source
stringlengths
3
92
original_c
stringlengths
26
2.25M
no_omp_formatted
stringlengths
0
2.25M
omp_formatted
stringlengths
0
2.25M
ccsd_pack.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <math.h> //#include <omp.h> #include "config.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" /* * a * v1 + b * v2.transpose(0,2,1,3) */ void CCmake_0213(double *out, double *v1, double *v2, int count, int m, double a, double b) { #pragma omp parallel default(none) \ shared(count, m, out, v1, v2, a, b) { int i, j, k, l, n; size_t d2 = m * m; size_t d1 = m * m * m; double *pv1, *pv2, *pout; #pragma omp for schedule (static) for (i = 0; i < count; i++) { for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++) { pout = out + d1*i + d2*j + m*k; pv1 = v1 + d1*i + d2*j + m*k; pv2 = v2 + d1*i + d2*k + m*j; for (l = 0; l < m; l++, n++) { pout[l] = pv1[l] * a + pv2[l] * b; } } } } } } /* * out = v1 + v2.transpose(0,2,1) */ void CCsum021(double *out, double *v1, double *v2, int count, int m) { #pragma omp parallel default(none) \ shared(count, m, out, v1, v2) { int i, j, k, n; size_t mm = m * m; double *pout, *pv1, *pv2; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + mm * i; pv1 = v1 + mm * i; pv2 = v2 + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++, n++) { pout[n] = pv1[n] + pv2[k*m+j]; } } } } } /* * g2 = a * v1 + b * v2.transpose(0,2,1) */ void CCmake_021(double *out, double *v1, double *v2, int count, int m, double a, double b) { if (a == 1 && b == 1) { return CCsum021(out, v1, v2, count, m); } #pragma omp parallel default(none) \ shared(count, m, out, v1, v2, a, b) { int i, j, k, n; size_t mm = m * m; double *pout, *pv1, *pv2; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + mm * i; pv1 = v1 + mm * i; pv2 = v2 + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++, n++) { pout[n] = pv1[n] * a + pv2[k*m+j] * b; } } } } } /* * if matrix B is symmetric for the contraction A_ij B_ij, * Tr(AB) ~ A_ii B_ii + (A_ij + A_ji) B_ij where i > j * This function extract the A_ii and the lower triangluar part of A_ij + A_ji */ void CCprecontract(double *out, double *in, int count, int m, double diagfac) { #pragma omp parallel default(none) \ shared(count, m, in, out, diagfac) { int i, j, k, n; size_t mm = m * m; size_t m2 = m * (m+1) / 2; double *pout, *pin; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + m2 * i; pin = in + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < j; k++, n++) { pout[n] = pin[j*m+k] + pin[k*m+j]; } pout[n] = pin[j*m+j] * diagfac; n++; } } } } /* * if i1 == j1: * eri = unpack_tril(eri, axis=0) * unpack_tril(eri).reshape(i1-i0,j1-j0,nao,nao).transpose(0,2,1,3) */ void CCload_eri(double *out, double *eri, int *orbs_slice, int nao) { int i0 = orbs_slice[0]; int i1 = orbs_slice[1]; int j0 = orbs_slice[2]; int j1 = orbs_slice[3]; size_t ni = i1 - i0; size_t nj = j1 - j0; size_t nn = nj * nao; size_t nao_pair = nao * (nao + 1) / 2; #pragma omp parallel default(none) \ shared(out, eri, i1, j1, ni, nj, nn, nao, nao_pair) { int i, j, k, l, ij; double *pout; double *buf = malloc(sizeof(double) * nao*nao); #pragma omp for schedule (static) for (ij = 0; ij < ni*nj; ij++) { i = ij / nj; j = ij % nj; NPdunpack_tril(nao, eri+ij*nao_pair, buf, 1); pout = out + (i*nn+j)*nao; for (k = 0; k < nao; k++) { for (l = 0; l < nao; l++) { pout[k*nn+l] = buf[k*nao+l]; } } } free(buf); } } /* * eri put virtual orbital first * [ v ] * [ v . ] * [ v . . ] * [ o . . . ] * [ o . . . . ] */ void CCsd_sort_inplace(double *eri, int nocc, int nvir, int count) { #pragma omp parallel default(none) \ shared(eri, nocc, nvir, count) { int ic, i, j, ij; size_t nmo = nocc + nvir; size_t nmo_pair = nmo * (nmo+1) / 2; size_t nocc_pair = nocc * (nocc+1) /2; size_t nvir_pair = nvir * (nvir+1) /2; double *peri, *pout; double *buf = malloc(sizeof(double) * nocc*nvir); #pragma omp for schedule (static) for (ic = 0; ic < count; ic++) { peri = eri + ic*nmo_pair + nvir_pair; for (i = 0; i < nocc; i++, peri+=nvir+i) { for (j = 0; j < nvir; j++) { buf[i*nvir+j] = peri[j]; } } pout = eri + ic*nmo_pair + nvir_pair; peri = eri + ic*nmo_pair + nvir_pair + nvir; for (ij = 0, i = 0; i < nocc; i++, peri+=nvir+i) { for (j = 0; j <= i; j++, ij++) { pout[ij] = peri[j]; } } pout = eri + ic*nmo_pair + nvir_pair + nocc_pair; NPdcopy(pout, buf, nocc*nvir); } free(buf); } }
#include <stdlib.h> #include <math.h> // #include <omp.h> #include "config.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" /* * a * v1 + b * v2.transpose(0,2,1,3) */ void CCmake_0213(double *out, double *v1, double *v2, int count, int m, double a, double b) { shared(count, m, out, v1, v2, a, b) { int i, j, k, l, n; size_t d2 = m * m; size_t d1 = m * m * m; double *pv1, *pv2, *pout; for (i = 0; i < count; i++) { for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++) { pout = out + d1 * i + d2 * j + m * k; pv1 = v1 + d1 * i + d2 * j + m * k; pv2 = v2 + d1 * i + d2 * k + m * j; for (l = 0; l < m; l++, n++) { pout[l] = pv1[l] * a + pv2[l] * b; } } } } } } /* * out = v1 + v2.transpose(0,2,1) */ void CCsum021(double *out, double *v1, double *v2, int count, int m) { shared(count, m, out, v1, v2) { int i, j, k, n; size_t mm = m * m; double *pout, *pv1, *pv2; for (i = 0; i < count; i++) { pout = out + mm * i; pv1 = v1 + mm * i; pv2 = v2 + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++, n++) { pout[n] = pv1[n] + pv2[k * m + j]; } } } } } /* * g2 = a * v1 + b * v2.transpose(0,2,1) */ void CCmake_021(double *out, double *v1, double *v2, int count, int m, double a, double b) { if (a == 1 && b == 1) { return CCsum021(out, v1, v2, count, m); } shared(count, m, out, v1, v2, a, b) { int i, j, k, n; size_t mm = m * m; double *pout, *pv1, *pv2; for (i = 0; i < count; i++) { pout = out + mm * i; pv1 = v1 + mm * i; pv2 = v2 + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++, n++) { pout[n] = pv1[n] * a + pv2[k * m + j] * b; } } } } } /* * if matrix B is symmetric for the contraction A_ij B_ij, Tr(AB) ~ A_ii B_ii * + (A_ij + A_ji) B_ij where i > j This function extract the A_ii and the * lower triangluar part of A_ij + A_ji */ void CCprecontract(double *out, double *in, int count, int m, double diagfac) { shared(count, m, in, out, diagfac) { int i, j, k, n; size_t mm = m * m; size_t m2 = m * (m + 1) / 2; double *pout, *pin; for (i = 0; i < count; i++) { pout = out + m2 * i; pin = in + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < j; k++, n++) { pout[n] = pin[j * m + k] + pin[k * m + j]; } pout[n] = pin[j * m + j] * diagfac; n++; } } } } /* * if i1 == j1: eri = unpack_tril(eri, axis=0) * unpack_tril(eri).reshape(i1-i0,j1-j0,nao,nao).transpose(0,2,1,3) */ void CCload_eri(double *out, double *eri, int *orbs_slice, int nao) { int i0 = orbs_slice[0]; int i1 = orbs_slice[1]; int j0 = orbs_slice[2]; int j1 = orbs_slice[3]; size_t ni = i1 - i0; size_t nj = j1 - j0; size_t nn = nj * nao; size_t nao_pair = nao * (nao + 1) / 2; shared(out, eri, i1, j1, ni, nj, nn, nao, nao_pair) { int i, j, k, l, ij; double *pout; double *buf = malloc(sizeof(double) * nao * nao); for (ij = 0; ij < ni * nj; ij++) { i = ij / nj; j = ij % nj; NPdunpack_tril(nao, eri + ij * nao_pair, buf, 1); pout = out + (i * nn + j) * nao; for (k = 0; k < nao; k++) { for (l = 0; l < nao; l++) { pout[k * nn + l] = buf[k * nao + l]; } } } free(buf); } } /* * eri put virtual orbital first [ v ] [ v . ] [ v . . ] [ * o . . . ] [ o . . . . ] */ void CCsd_sort_inplace(double *eri, int nocc, int nvir, int count) { shared(eri, nocc, nvir, count) { int ic, i, j, ij; size_t nmo = nocc + nvir; size_t nmo_pair = nmo * (nmo + 1) / 2; size_t nocc_pair = nocc * (nocc + 1) / 2; size_t nvir_pair = nvir * (nvir + 1) / 2; double *peri, *pout; double *buf = malloc(sizeof(double) * nocc * nvir); for (ic = 0; ic < count; ic++) { peri = eri + ic * nmo_pair + nvir_pair; for (i = 0; i < nocc; i++, peri += nvir + i) { for (j = 0; j < nvir; j++) { buf[i * nvir + j] = peri[j]; } } pout = eri + ic * nmo_pair + nvir_pair; peri = eri + ic * nmo_pair + nvir_pair + nvir; for (ij = 0, i = 0; i < nocc; i++, peri += nvir + i) { for (j = 0; j <= i; j++, ij++) { pout[ij] = peri[j]; } } pout = eri + ic * nmo_pair + nvir_pair + nocc_pair; NPdcopy(pout, buf, nocc * nvir); } free(buf); } }
#include <stdlib.h> #include <math.h> // #include <omp.h> #include "config.h" #include "np_helper/np_helper.h" #include "vhf/fblas.h" /* * a * v1 + b * v2.transpose(0,2,1,3) */ void CCmake_0213(double *out, double *v1, double *v2, int count, int m, double a, double b) { #pragma omp parallel default(none) \ shared(count, m, out, v1, v2, a, b) { int i, j, k, l, n; size_t d2 = m * m; size_t d1 = m * m * m; double *pv1, *pv2, *pout; #pragma omp for schedule (static) for (i = 0; i < count; i++) { for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++) { pout = out + d1 * i + d2 * j + m * k; pv1 = v1 + d1 * i + d2 * j + m * k; pv2 = v2 + d1 * i + d2 * k + m * j; for (l = 0; l < m; l++, n++) { pout[l] = pv1[l] * a + pv2[l] * b; } } } } } } /* * out = v1 + v2.transpose(0,2,1) */ void CCsum021(double *out, double *v1, double *v2, int count, int m) { #pragma omp parallel default(none) \ shared(count, m, out, v1, v2) { int i, j, k, n; size_t mm = m * m; double *pout, *pv1, *pv2; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + mm * i; pv1 = v1 + mm * i; pv2 = v2 + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++, n++) { pout[n] = pv1[n] + pv2[k * m + j]; } } } } } /* * g2 = a * v1 + b * v2.transpose(0,2,1) */ void CCmake_021(double *out, double *v1, double *v2, int count, int m, double a, double b) { if (a == 1 && b == 1) { return CCsum021(out, v1, v2, count, m); } #pragma omp parallel default(none) \ shared(count, m, out, v1, v2, a, b) { int i, j, k, n; size_t mm = m * m; double *pout, *pv1, *pv2; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + mm * i; pv1 = v1 + mm * i; pv2 = v2 + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < m; k++, n++) { pout[n] = pv1[n] * a + pv2[k * m + j] * b; } } } } } /* * if matrix B is symmetric for the contraction A_ij B_ij, Tr(AB) ~ A_ii B_ii * + (A_ij + A_ji) B_ij where i > j This function extract the A_ii and the * lower triangluar part of A_ij + A_ji */ void CCprecontract(double *out, double *in, int count, int m, double diagfac) { #pragma omp parallel default(none) \ shared(count, m, in, out, diagfac) { int i, j, k, n; size_t mm = m * m; size_t m2 = m * (m + 1) / 2; double *pout, *pin; #pragma omp for schedule (static) for (i = 0; i < count; i++) { pout = out + m2 * i; pin = in + mm * i; for (n = 0, j = 0; j < m; j++) { for (k = 0; k < j; k++, n++) { pout[n] = pin[j * m + k] + pin[k * m + j]; } pout[n] = pin[j * m + j] * diagfac; n++; } } } } /* * if i1 == j1: eri = unpack_tril(eri, axis=0) * unpack_tril(eri).reshape(i1-i0,j1-j0,nao,nao).transpose(0,2,1,3) */ void CCload_eri(double *out, double *eri, int *orbs_slice, int nao) { int i0 = orbs_slice[0]; int i1 = orbs_slice[1]; int j0 = orbs_slice[2]; int j1 = orbs_slice[3]; size_t ni = i1 - i0; size_t nj = j1 - j0; size_t nn = nj * nao; size_t nao_pair = nao * (nao + 1) / 2; #pragma omp parallel default(none) \ shared(out, eri, i1, j1, ni, nj, nn, nao, nao_pair) { int i, j, k, l, ij; double *pout; double *buf = malloc(sizeof(double) * nao * nao); #pragma omp for schedule (static) for (ij = 0; ij < ni * nj; ij++) { i = ij / nj; j = ij % nj; NPdunpack_tril(nao, eri + ij * nao_pair, buf, 1); pout = out + (i * nn + j) * nao; for (k = 0; k < nao; k++) { for (l = 0; l < nao; l++) { pout[k * nn + l] = buf[k * nao + l]; } } } free(buf); } } /* * eri put virtual orbital first [ v ] [ v . ] [ v . . ] [ * o . . . ] [ o . . . . ] */ void CCsd_sort_inplace(double *eri, int nocc, int nvir, int count) { #pragma omp parallel default(none) \ shared(eri, nocc, nvir, count) { int ic, i, j, ij; size_t nmo = nocc + nvir; size_t nmo_pair = nmo * (nmo + 1) / 2; size_t nocc_pair = nocc * (nocc + 1) / 2; size_t nvir_pair = nvir * (nvir + 1) / 2; double *peri, *pout; double *buf = malloc(sizeof(double) * nocc * nvir); #pragma omp for schedule (static) for (ic = 0; ic < count; ic++) { peri = eri + ic * nmo_pair + nvir_pair; for (i = 0; i < nocc; i++, peri += nvir + i) { for (j = 0; j < nvir; j++) { buf[i * nvir + j] = peri[j]; } } pout = eri + ic * nmo_pair + nvir_pair; peri = eri + ic * nmo_pair + nvir_pair + nvir; for (ij = 0, i = 0; i < nocc; i++, peri += nvir + i) { for (j = 0; j <= i; j++, ij++) { pout[ij] = peri[j]; } } pout = eri + ic * nmo_pair + nvir_pair + nocc_pair; NPdcopy(pout, buf, nocc * nvir); } free(buf); } }
DRB099-targetparallelfor2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* use of omp target + map + array sections derived from pointers */ void foo (double* a, double* b, int N) { int i; #pragma omp parallel for private(i) for (i=0;i< N ;i++) b[i]=a[i]*(double)i; } int main(int argc, char* argv[]) { int i; int len = 1000; double a[len], b[len]; #pragma omp parallel for private(i) for (i=0; i<len; i++) { a[i]= ((double)i)/2.0; b[i]=0.0; } foo(a, b, len); printf("b[50]=%f\n",b[50]); return 0; }
#include <stdio.h> /* * use of omp target + map + array sections derived from pointers */ void foo(double *a, double *b, int N) { int i; for (i = 0; i < N; i++) b[i] = a[i] * (double)i; } int main(int argc, char *argv[]) { int i; int len = 1000; double a[len], b[len]; for (i = 0; i < len; i++) { a[i] = ((double)i) / 2.0; b[i] = 0.0; } foo(a, b, len); printf("b[50]=%f\n", b[50]); return 0; }
#include <stdio.h> /* * use of omp target + map + array sections derived from pointers */ void foo(double *a, double *b, int N) { int i; #pragma omp parallel for private(i) for (i = 0; i < N; i++) b[i] = a[i] * (double)i; } int main(int argc, char *argv[]) { int i; int len = 1000; double a[len], b[len]; #pragma omp parallel for private(i) for (i = 0; i < len; i++) { a[i] = ((double)i) / 2.0; b[i] = 0.0; } foo(a, b, len); printf("b[50]=%f\n", b[50]); return 0; }
trmm_x_sky_n_lo_row_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { #ifdef COMPLEX ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < mat->rows; i++) for(ALPHA_INT j = 0; j < columns; j++) alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { for (ALPHA_INT ac = 0; ac < mat->cols; ++ac) { ALPHA_INT start = mat->pointers[ac]; ALPHA_INT end = mat->pointers[ac + 1]; ALPHA_INT idx = 1; ALPHA_INT eles_num = end - start; for (ALPHA_INT ai = start; ai < end; ++ai) { ALPHA_INT cr = ac - eles_num + idx; if (ac >= cr) { ALPHA_Number t; alpha_mul_3c(t, alpha, mat->values[ai]); alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]); } idx++; } } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY * mat, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number * y, const ALPHA_INT ldy) { #ifdef COMPLEX ALPHA_INT num_threads = alpha_get_thread_num(); for (ALPHA_INT i = 0; i < mat->rows; i++) for (ALPHA_INT j = 0; j < columns; j++) alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta); for (ALPHA_INT cc = 0; cc < columns; ++cc) { for (ALPHA_INT ac = 0; ac < mat->cols; ++ac) { ALPHA_INT start = mat->pointers[ac]; ALPHA_INT end = mat->pointers[ac + 1]; ALPHA_INT idx = 1; ALPHA_INT eles_num = end - start; for (ALPHA_INT ai = start; ai < end; ++ai) { ALPHA_INT cr = ac - eles_num + idx; if (ac >= cr) { ALPHA_Number t; alpha_mul_3c(t, alpha, mat->values[ai]); alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]); } idx++; } } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY * mat, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number * y, const ALPHA_INT ldy) { #ifdef COMPLEX ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < mat->rows; i++) for (ALPHA_INT j = 0; j < columns; j++) alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { for (ALPHA_INT ac = 0; ac < mat->cols; ++ac) { ALPHA_INT start = mat->pointers[ac]; ALPHA_INT end = mat->pointers[ac + 1]; ALPHA_INT idx = 1; ALPHA_INT eles_num = end - start; for (ALPHA_INT ai = start; ai < end; ++ai) { ALPHA_INT cr = ac - eles_num + idx; if (ac >= cr) { ALPHA_Number t; alpha_mul_3c(t, alpha, mat->values[ai]); alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]); } idx++; } } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
solution-openmp.c
// Translate this file with // // g++ -O3 --std=c++11 assignment-2019.c -o assignment // // Run it with // // ./assignment // // There should be a result.pvd file that you can open with Paraview. // Sometimes, Paraview requires to select the representation "Point Gaussian" // to see something meaningful. // // (C) 2019 Tobias Weinzierl #include <fstream> #include <sstream> #include <iostream> #include <string> #include <cmath> #include <limits> #ifdef _OPENMP #include <omp.h> /* use OpenMP only if needed */ #endif double t = 0; double tFinal = 0; double tPlot = 0; double tPlotDelta = 0; int NumberOfBodies = 0; /** * Pointer to pointers. Each pointer in turn points to three coordinates, i.e. * each pointer represents one molecule/particle/body. You are allowed to make * AoS vs SoA optimisations. Just keep in mind that such modifications are often * time-consuming (be economic with your investments) and that the output of the * code may not change! */ double** x; /** * Equivalent to x storing the velocities. */ double** v; /** * Global time step size used. */ double timeStepSize = 0.0001; /** * Maximum velocity of all particles. */ double maxV; /** * Minimum distance between two elements. */ double minDx; /** * Set up scenario from the command line. * * This operation is not to be changed in the assignment. */ void setUp(int argc, char** argv) { NumberOfBodies = (argc-2) / 6; x = new double*[NumberOfBodies]; v = new double*[NumberOfBodies]; int readArgument = 1; tPlotDelta = std::stof(argv[readArgument]); readArgument++; tFinal = std::stof(argv[readArgument]); readArgument++; for (int i=0; i<NumberOfBodies; i++) { x[i] = new double[3]; v[i] = new double[3]; x[i][0] = std::stof(argv[readArgument]); readArgument++; x[i][1] = std::stof(argv[readArgument]); readArgument++; x[i][2] = std::stof(argv[readArgument]); readArgument++; v[i][0] = std::stof(argv[readArgument]); readArgument++; v[i][1] = std::stof(argv[readArgument]); readArgument++; v[i][2] = std::stof(argv[readArgument]); readArgument++; } std::cout << "created setup with " << NumberOfBodies << " bodies" << std::endl; if (tPlotDelta<=0.0) { std::cout << "plotting switched off" << std::endl; tPlot = tFinal + 1.0; } else { std::cout << "plot initial setup plus every " << tPlotDelta << " time units" << std::endl; tPlot = 0.0; } } std::ofstream videoFile; /** * This operation is not to be changed in the assignment. */ void openParaviewVideoFile() { videoFile.open( "result.pvd" ); videoFile << "<?xml version=\"1.0\"?>" << std::endl << "<VTKFile type=\"Collection\" version=\"0.1\" byte_order=\"LittleEndian\" compressor=\"vtkZLibDataCompressor\">" << std::endl << "<Collection>"; } /** * This operation is not to be changed in the assignment. */ void closeParaviewVideoFile() { videoFile << "</Collection>" << "</VTKFile>" << std::endl; } /** * The file format is documented at http://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf * * This operation is not to be changed in the assignment. */ void printParaviewSnapshot() { static int counter = -1; counter++; std::stringstream filename; filename << "result-" << counter << ".vtp"; std::ofstream out( filename.str().c_str() ); out << "<VTKFile type=\"PolyData\" >" << std::endl << "<PolyData>" << std::endl << " <Piece NumberOfPoints=\"" << NumberOfBodies << "\">" << std::endl << " <Points>" << std::endl << " <DataArray type=\"Float32\" NumberOfComponents=\"3\" format=\"ascii\">"; for (int i=0; i<NumberOfBodies; i++) { out << x[i][0] << " " << x[i][1] << " " << x[i][2] << " "; } out << " </DataArray>" << std::endl << " </Points>" << std::endl << " </Piece>" << std::endl << "</PolyData>" << std::endl << "</VTKFile>" << std::endl; videoFile << "<DataSet timestep=\"" << counter << "\" group=\"\" part=\"0\" file=\"" << filename.str() << "\"/>" << std::endl; } /** * This is the operation you should primarily change in the assignment. Please * keep to the method topology, i.e. do not subdivide further. Also try to have * as many modifications as possible in this part - some initialisation/clean up * likely goes somewhere else, but the main stuff should be done here. * * See array documentations, too. */ void updateBody() { maxV = 0.0; minDx = std::numeric_limits<double>::max(); const double sigma = 3.4e-10; const double epsilon = 1.64e-21; const double mass = 39.948; double* force0 = new double[NumberOfBodies]; double* force1 = new double[NumberOfBodies]; double* force2 = new double[NumberOfBodies]; #pragma omp parallel { // double tempDx = 0; #pragma omp for for (int i=0; i<NumberOfBodies; i++) { force0[i] = 0.0; force1[i] = 0.0; force2[i] = 0.0; #pragma omp parallel for reduction(min: minDx) for (int j=0; j<NumberOfBodies; j++) { if (i!=j) { const double distance = sqrt( (x[j][0]-x[i][0]) * (x[j][0]-x[i][0]) + (x[j][1]-x[i][1]) * (x[j][1]-x[i][1]) + (x[j][2]-x[i][2]) * (x[j][2]-x[i][2]) ); minDx = std::min( minDx,distance ); double quantity = 4.0 * epsilon * ( -12.0 * std::pow(sigma,12.0) / std::pow(distance,12.0) + 6.0 * std::pow(sigma,6.0) / std::pow(distance,6.0) ) / distance; force0[i] += (x[j][0]-x[i][0]) * quantity / distance ; force1[i] += (x[j][1]-x[i][1]) * quantity / distance ; force2[i] += (x[j][2]-x[i][2]) * quantity / distance ; } } } //#pragma omp critical // { // minDx = std::min(minDx,tempDx); // } } #pragma omp parallel for for (int i=0; i<NumberOfBodies; i++) { x[i][0] = x[i][0] + timeStepSize * v[i][0]; x[i][1] = x[i][1] + timeStepSize * v[i][1]; x[i][2] = x[i][2] + timeStepSize * v[i][2]; } //#pragma omp parallel // { // double tempV = 0; #pragma omp parallel for reduction(max: maxV) for (int i=0; i<NumberOfBodies; i++) { v[i][0] = v[i][0] + timeStepSize * force0[i] / mass; v[i][1] = v[i][1] + timeStepSize * force1[i] / mass; v[i][2] = v[i][2] + timeStepSize * force2[i] / mass; double thisV = std::sqrt( v[i][0]*v[i][0] + v[i][1]*v[i][1] + v[i][2]*v[i][2] ); maxV = std::max(maxV,thisV); } //#pragma omp critical // { // maxV = std::max(tempV,maxV); // } // } delete[] force0; delete[] force1; delete[] force2; t += timeStepSize; } /** * Main routine. * * Not to be changed in assignment. */ int main(int argc, char** argv) { if (argc==1) { std::cerr << "usage: " + std::string(argv[0]) + " snapshot final-time objects" << std::endl << " snapshot interval after how many time units to plot. Use 0 to switch off plotting" << std::endl << " final-time simulated time (greater 0)" << std::endl << std::endl << "Examples:" << std::endl << "10.0 10000.0 0 0 0 0 0 0 2e-9 0 0 0 0 0 0.9e-9 1e-9 0 0 0 0 \t Three body setup" << std::endl << std::endl; return -1; } else if ( (argc-3)%6!=0 ) { std::cerr << "error in arguments: each planet is given by six entries (position, velocity)" << std::endl; return -2; } setUp(argc,argv); openParaviewVideoFile(); int snapshotCounter = 0; if (t > tPlot) { printParaviewSnapshot(); std::cout << "plotted initial setup" << std::endl; tPlot = tPlotDelta; } int timeStepCounter = 0; while (t<=tFinal) { updateBody(); timeStepCounter++; if (t >= tPlot) { printParaviewSnapshot(); std::cout << "plot next snapshot" << ",\t time step=" << timeStepCounter << ",\t t=" << t << ",\t dt=" << timeStepSize << ",\t v_max=" << maxV << ",\t dx_min=" << minDx << std::endl; tPlot += tPlotDelta; } } closeParaviewVideoFile(); return 0; }
// Translate this file with // //g++ - O3-- std = c++ 11 assignment - 2019. c - o assignment // //Run it with // //./ assignment // //There should be a result.pvd file that you can open with Paraview. // Sometimes, Paraview requires to select the representation "Point Gaussian" // to see something meaningful. // //(C) 2019 Tobias Weinzierl #include <fstream> #include <sstream> #include <iostream> #include <string> #include <cmath> #include <limits> double t = 0; double tFinal = 0; double tPlot = 0; double tPlotDelta = 0; int NumberOfBodies = 0; /** * Pointer to pointers. Each pointer in turn points to three coordinates, i.e. * each pointer represents one molecule/particle/body. You are allowed to make * AoS vs SoA optimisations. Just keep in mind that such modifications are often * time-consuming (be economic with your investments) and that the output of the * code may not change! */ double **x; /** * Equivalent to x storing the velocities. */ double **v; /** * Global time step size used. */ double timeStepSize = 0.0001; /** * Maximum velocity of all particles. */ double maxV; /** * Minimum distance between two elements. */ double minDx; /** * Set up scenario from the command line. * * This operation is not to be changed in the assignment. */ void setUp(int argc, char **argv) { NumberOfBodies = (argc - 2) / 6; x = new double *[NumberOfBodies]; v = new double *[NumberOfBodies]; int readArgument = 1; tPlotDelta = std: :stof(argv[readArgument]); readArgument++; tFinal = std: :stof(argv[readArgument]); readArgument++; for (int i = 0; i < NumberOfBodies; i++) { x[i] = new double[3]; v[i] = new double[3]; x[i][0] = std: :stof(argv[readArgument]); readArgument++; x[i][1] = std: :stof(argv[readArgument]); readArgument++; x[i][2] = std: :stof(argv[readArgument]); readArgument++; v[i][0] = std: :stof(argv[readArgument]); readArgument++; v[i][1] = std: :stof(argv[readArgument]); readArgument++; v[i][2] = std: :stof(argv[readArgument]); readArgument++; } std: : cout << "created setup with " << NumberOfBodies << " bodies" << std: :endl; if (tPlotDelta <= 0.0) { std: : cout << "plotting switched off" << std: :endl; tPlot = tFinal + 1.0; } else { std: : cout << "plot initial setup plus every " << tPlotDelta << " time units" << std: :endl; tPlot = 0.0; } } std: :ofstream videoFile; /** * This operation is not to be changed in the assignment. */ void openParaviewVideoFile() { videoFile.open("result.pvd"); videoFile << "<?xml version=\"1.0\"?>" << std: :endl << "<VTKFile type=\"Collection\" version=\"0.1\" byte_order=\"LittleEndian\" compressor=\"vtkZLibDataCompressor\">" << std: :endl << "<Collection>"; } /** * This operation is not to be changed in the assignment. */ void closeParaviewVideoFile() { videoFile << "</Collection>" << "</VTKFile>" << std: :endl; } /** * The file format is documented at http://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf * * This operation is not to be changed in the assignment. */ void printParaviewSnapshot() { static int counter = -1; counter++; std: :stringstream filename; filename << "result-" << counter << ".vtp"; std: :ofstream out(filename.str().c_str()); out << "<VTKFile type=\"PolyData\" >" << std: :endl << "<PolyData>" << std: :endl << " <Piece NumberOfPoints=\"" << NumberOfBodies << "\">" << std: :endl << " <Points>" << std: :endl << " <DataArray type=\"Float32\" NumberOfComponents=\"3\" format=\"ascii\">"; for (int i = 0; i < NumberOfBodies; i++) { out << x[i][0] << " " << x[i][1] << " " << x[i][2] << " "; } out << " </DataArray>" << std: :endl << " </Points>" << std: :endl << " </Piece>" << std: :endl << "</PolyData>" << std: :endl << "</VTKFile>" << std: :endl; videoFile << "<DataSet timestep=\"" << counter << "\" group=\"\" part=\"0\" file=\"" << filename.str() << "\"/>" << std: :endl; } /** * This is the operation you should primarily change in the assignment. Please * keep to the method topology, i.e. do not subdivide further. Also try to have * as many modifications as possible in this part - some initialisation/clean up * likely goes somewhere else, but the main stuff should be done here. * * See array documentations, too. */ void updateBody() { maxV = 0.0; minDx = std: :numeric_limits < double >::max(); const double sigma = 3.4e-10; const double epsilon = 1.64e-21; const double mass = 39.948; double *force0 = new double[NumberOfBodies]; double *force1 = new double[NumberOfBodies]; double *force2 = new double[NumberOfBodies]; //double tempDx = 0; for (int i = 0; i < NumberOfBodies; i++) { force0[i] = 0.0; force1[i] = 0.0; force2[i] = 0.0; for (int j = 0; j < NumberOfBodies; j++) { if (i != j) { const double distance = sqrt( (x[j][0] - x[i][0]) * (x[j][0] - x[i][0]) + (x[j][1] - x[i][1]) * (x[j][1] - x[i][1]) + (x[j][2] - x[i][2]) * (x[j][2] - x[i][2]) ); minDx = std: :min(minDx, distance); double quantity = 4.0 * epsilon * (-12.0 * std::pow(sigma, 12.0) / std::pow(distance, 12.0) + 6.0 * std::pow(sigma, 6.0) / std::pow(distance, 6.0)) / distance; force0[i] += (x[j][0] - x[i][0]) * quantity / distance; force1[i] += (x[j][1] - x[i][1]) * quantity / distance; force2[i] += (x[j][2] - x[i][2]) * quantity / distance; } } } // // { //minDx = std: :min(minDx, tempDx); // } for (int i = 0; i < NumberOfBodies; i++) { x[i][0] = x[i][0] + timeStepSize * v[i][0]; x[i][1] = x[i][1] + timeStepSize * v[i][1]; x[i][2] = x[i][2] + timeStepSize * v[i][2]; } // // { //double tempV = 0; for (int i = 0; i < NumberOfBodies; i++) { v[i][0] = v[i][0] + timeStepSize * force0[i] / mass; v[i][1] = v[i][1] + timeStepSize * force1[i] / mass; v[i][2] = v[i][2] + timeStepSize * force2[i] / mass; double thisV = std::sqrt(v[i][0] * v[i][0] + v[i][1] * v[i][1] + v[i][2] * v[i][2]); maxV = std: :max(maxV, thisV); } // // { //maxV = std: :max(tempV, maxV); // } // } delete[] force0; delete[] force1; delete[] force2; t += timeStepSize; } /** * Main routine. * * Not to be changed in assignment. */ int main(int argc, char **argv) { if (argc == 1) { std: : cerr << "usage: " + std: : string(argv[0]) + " snapshot final-time objects" << std: :endl << " snapshot interval after how many time units to plot. Use 0 to switch off plotting" << std: :endl << " final-time simulated time (greater 0)" << std: :endl << std: :endl << "Examples:" << std: :endl << "10.0 10000.0 0 0 0 0 0 0 2e-9 0 0 0 0 0 0.9e-9 1e-9 0 0 0 0 \t Three body setup" << std: :endl << std: :endl; return -1; } else if ((argc - 3) % 6 != 0) { std: : cerr << "error in arguments: each planet is given by six entries (position, velocity)" << std: :endl; return -2; } setUp(argc, argv); openParaviewVideoFile(); int snapshotCounter = 0; if (t > tPlot) { printParaviewSnapshot(); std: : cout << "plotted initial setup" << std: :endl; tPlot = tPlotDelta; } int timeStepCounter = 0; while (t <= tFinal) { updateBody(); timeStepCounter++; if (t >= tPlot) { printParaviewSnapshot(); std: : cout << "plot next snapshot" << ",\t time step=" << timeStepCounter << ",\t t=" << t << ",\t dt=" << timeStepSize << ",\t v_max=" << maxV << ",\t dx_min=" << minDx << std: : endl; tPlot += tPlotDelta; } } closeParaviewVideoFile(); return 0; }
// Translate this file with // //g++ - O3-- std = c++ 11 assignment - 2019. c - o assignment // //Run it with // //./ assignment // //There should be a result.pvd file that you can open with Paraview. // Sometimes, Paraview requires to select the representation "Point Gaussian" // to see something meaningful. // //(C) 2019 Tobias Weinzierl #include <fstream> #include <sstream> #include <iostream> #include <string> #include <cmath> #include <limits> #ifdef _OPENMP #include <omp.h> /* use OpenMP only if needed */ #endif double t = 0; double tFinal = 0; double tPlot = 0; double tPlotDelta = 0; int NumberOfBodies = 0; /** * Pointer to pointers. Each pointer in turn points to three coordinates, i.e. * each pointer represents one molecule/particle/body. You are allowed to make * AoS vs SoA optimisations. Just keep in mind that such modifications are often * time-consuming (be economic with your investments) and that the output of the * code may not change! */ double **x; /** * Equivalent to x storing the velocities. */ double **v; /** * Global time step size used. */ double timeStepSize = 0.0001; /** * Maximum velocity of all particles. */ double maxV; /** * Minimum distance between two elements. */ double minDx; /** * Set up scenario from the command line. * * This operation is not to be changed in the assignment. */ void setUp(int argc, char **argv) { NumberOfBodies = (argc - 2) / 6; x = new double *[NumberOfBodies]; v = new double *[NumberOfBodies]; int readArgument = 1; tPlotDelta = std: :stof(argv[readArgument]); readArgument++; tFinal = std: :stof(argv[readArgument]); readArgument++; for (int i = 0; i < NumberOfBodies; i++) { x[i] = new double[3]; v[i] = new double[3]; x[i][0] = std: :stof(argv[readArgument]); readArgument++; x[i][1] = std: :stof(argv[readArgument]); readArgument++; x[i][2] = std: :stof(argv[readArgument]); readArgument++; v[i][0] = std: :stof(argv[readArgument]); readArgument++; v[i][1] = std: :stof(argv[readArgument]); readArgument++; v[i][2] = std: :stof(argv[readArgument]); readArgument++; } std: : cout << "created setup with " << NumberOfBodies << " bodies" << std: :endl; if (tPlotDelta <= 0.0) { std: : cout << "plotting switched off" << std: :endl; tPlot = tFinal + 1.0; } else { std: : cout << "plot initial setup plus every " << tPlotDelta << " time units" << std: :endl; tPlot = 0.0; } } std: :ofstream videoFile; /** * This operation is not to be changed in the assignment. */ void openParaviewVideoFile() { videoFile.open("result.pvd"); videoFile << "<?xml version=\"1.0\"?>" << std: :endl << "<VTKFile type=\"Collection\" version=\"0.1\" byte_order=\"LittleEndian\" compressor=\"vtkZLibDataCompressor\">" << std: :endl << "<Collection>"; } /** * This operation is not to be changed in the assignment. */ void closeParaviewVideoFile() { videoFile << "</Collection>" << "</VTKFile>" << std: :endl; } /** * The file format is documented at http://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf * * This operation is not to be changed in the assignment. */ void printParaviewSnapshot() { static int counter = -1; counter++; std: :stringstream filename; filename << "result-" << counter << ".vtp"; std: :ofstream out(filename.str().c_str()); out << "<VTKFile type=\"PolyData\" >" << std: :endl << "<PolyData>" << std: :endl << " <Piece NumberOfPoints=\"" << NumberOfBodies << "\">" << std: :endl << " <Points>" << std: :endl << " <DataArray type=\"Float32\" NumberOfComponents=\"3\" format=\"ascii\">"; for (int i = 0; i < NumberOfBodies; i++) { out << x[i][0] << " " << x[i][1] << " " << x[i][2] << " "; } out << " </DataArray>" << std: :endl << " </Points>" << std: :endl << " </Piece>" << std: :endl << "</PolyData>" << std: :endl << "</VTKFile>" << std: :endl; videoFile << "<DataSet timestep=\"" << counter << "\" group=\"\" part=\"0\" file=\"" << filename.str() << "\"/>" << std: :endl; } /** * This is the operation you should primarily change in the assignment. Please * keep to the method topology, i.e. do not subdivide further. Also try to have * as many modifications as possible in this part - some initialisation/clean up * likely goes somewhere else, but the main stuff should be done here. * * See array documentations, too. */ void updateBody() { maxV = 0.0; minDx = std: :numeric_limits < double >::max(); const double sigma = 3.4e-10; const double epsilon = 1.64e-21; const double mass = 39.948; double *force0 = new double[NumberOfBodies]; double *force1 = new double[NumberOfBodies]; double *force2 = new double[NumberOfBodies]; #pragma omp parallel { //double tempDx = 0; #pragma omp for for (int i = 0; i < NumberOfBodies; i++) { force0[i] = 0.0; force1[i] = 0.0; force2[i] = 0.0; #pragma omp parallel for reduction(min: minDx) for (int j = 0; j < NumberOfBodies; j++) { if (i != j) { const double distance = sqrt( (x[j][0] - x[i][0]) * (x[j][0] - x[i][0]) + (x[j][1] - x[i][1]) * (x[j][1] - x[i][1]) + (x[j][2] - x[i][2]) * (x[j][2] - x[i][2]) ); minDx = std: :min(minDx, distance); double quantity = 4.0 * epsilon * (-12.0 * std::pow(sigma, 12.0) / std::pow(distance, 12.0) + 6.0 * std::pow(sigma, 6.0) / std::pow(distance, 6.0)) / distance; force0[i] += (x[j][0] - x[i][0]) * quantity / distance; force1[i] += (x[j][1] - x[i][1]) * quantity / distance; force2[i] += (x[j][2] - x[i][2]) * quantity / distance; } } } // #pragma omp critical // { //minDx = std: :min(minDx, tempDx); // } } #pragma omp parallel for for (int i = 0; i < NumberOfBodies; i++) { x[i][0] = x[i][0] + timeStepSize * v[i][0]; x[i][1] = x[i][1] + timeStepSize * v[i][1]; x[i][2] = x[i][2] + timeStepSize * v[i][2]; } // #pragma omp parallel // { //double tempV = 0; #pragma omp parallel for reduction(max: maxV) for (int i = 0; i < NumberOfBodies; i++) { v[i][0] = v[i][0] + timeStepSize * force0[i] / mass; v[i][1] = v[i][1] + timeStepSize * force1[i] / mass; v[i][2] = v[i][2] + timeStepSize * force2[i] / mass; double thisV = std::sqrt(v[i][0] * v[i][0] + v[i][1] * v[i][1] + v[i][2] * v[i][2]); maxV = std: :max(maxV, thisV); } // #pragma omp critical // { //maxV = std: :max(tempV, maxV); // } // } delete[] force0; delete[] force1; delete[] force2; t += timeStepSize; } /** * Main routine. * * Not to be changed in assignment. */ int main(int argc, char **argv) { if (argc == 1) { std: : cerr << "usage: " + std: : string(argv[0]) + " snapshot final-time objects" << std: :endl << " snapshot interval after how many time units to plot. Use 0 to switch off plotting" << std: :endl << " final-time simulated time (greater 0)" << std: :endl << std: :endl << "Examples:" << std: :endl << "10.0 10000.0 0 0 0 0 0 0 2e-9 0 0 0 0 0 0.9e-9 1e-9 0 0 0 0 \t Three body setup" << std: :endl << std: :endl; return -1; } else if ((argc - 3) % 6 != 0) { std: : cerr << "error in arguments: each planet is given by six entries (position, velocity)" << std: :endl; return -2; } setUp(argc, argv); openParaviewVideoFile(); int snapshotCounter = 0; if (t > tPlot) { printParaviewSnapshot(); std: : cout << "plotted initial setup" << std: :endl; tPlot = tPlotDelta; } int timeStepCounter = 0; while (t <= tFinal) { updateBody(); timeStepCounter++; if (t >= tPlot) { printParaviewSnapshot(); std: : cout << "plot next snapshot" << ",\t time step=" << timeStepCounter << ",\t t=" << t << ",\t dt=" << timeStepSize << ",\t v_max=" << maxV << ",\t dx_min=" << minDx << std: : endl; tPlot += tPlotDelta; } } closeParaviewVideoFile(); return 0; }
util_test.h
/******************************************************************************* * Copyright 2018 Tensor Tang. All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /** * This file defines some utilities that used in gtest */ #pragma once #include <cmath> #include "gtest/gtest.h" #include "jitinfer.h" #include "omp_thread.h" #include "util.h" #include "util_jitinfer.h" namespace jitinfer { namespace util { template <typename data_t> static inline data_t set_value(size_t index) { using data_type = jitinfer::memory::dtype; if (type2dtype<data_t>::dtype == data_type::f32) { double mean = 1., deviation = 1e-2; return static_cast<data_t>(mean + deviation * sinf(float(index % 37))); } else if (one_of(type2dtype<data_t>::dtype, data_type::s8, data_type::s32)) { return data_t(rand() % 21 - 10); } else if (type2dtype<data_t>::dtype == data_type::u8) { return data_t(rand() % 17); } else { return data_t(0); } } template <typename T> void fill_data(T* p, size_t sz) { #pragma omp parallel for schedule(static) for (size_t i = 0; i < sz; i++) { p[i] = set_value<T>(i); } } template <typename data_t> static inline data_t set_value(size_t index, data_t mmin, data_t mmax) { using data_type = jitinfer::memory::dtype; if (type2dtype<data_t>::dtype == data_type::f32) { return static_cast<data_t>(mmin + (mmax - mmin) * (float)(rand() % 100) / 100.f); } else if (one_of(type2dtype<data_t>::dtype, data_type::s8, data_type::u8, data_type::s32)) { return data_t(mmin + rand() % (s32)(mmax - mmin)); } else { return data_t(0); } } template <typename T> void fill_data(T* p, size_t sz, T a, T b) { #pragma omp parallel for schedule(static) for (size_t i = 0; i < sz; i++) { p[i] = set_value<T>(i, std::min(a, b), std::max(a, b)); } } template <typename T> void compare_array(T* dst, T* ref, size_t sz) { #pragma omp parallel for schedule(static) for (size_t i = 0; i < sz; ++i) { if (std::is_same<T, f32>::value) { f32 diff = dst[i] - ref[i]; f32 e = (std::fabs(ref[i]) > (f32)1e-4) ? diff / ref[i] : diff; EXPECT_NEAR(e, 0.f, (f32)1e-4) << "Index: " << i << " Total: " << sz; } else { EXPECT_EQ(dst[i], ref[i]) << "Index: " << i << " Total: " << sz; } } } } }
/** * This file defines some utilities that used in gtest */ #pragma once #include <cmath> #include "gtest/gtest.h" #include "jitinfer.h" #include "omp_thread.h" #include "util.h" #include "util_jitinfer.h" namespace jitinfer { namespace util { template < typename data_t > static inline data_t set_value(size_t index) { using data_type = jitinfer::memory::dtype; if (type2dtype < data_t >: : dtype == data_type: : f32) { double mean = 1., deviation = 1e-2; return static_cast < data_t > (mean + deviation * sinf(float (index % 37))); } else if (one_of(type2dtype < data_t >: : dtype, data_type: : s8, data_type: : s32)) { return data_t(rand() % 21 - 10); } else if (type2dtype < data_t >: : dtype == data_type: : u8) { return data_t(rand() % 17); } else { return data_t(0); } } template < typename T > void fill_data(T * p, size_t sz) { for (size_t i = 0; i < sz; i++) { p[i] = set_value < T > (i); } } template < typename data_t > static inline data_t set_value(size_t index, data_t mmin, data_t mmax) { using data_type = jitinfer::memory::dtype; if (type2dtype < data_t >: : dtype == data_type: : f32) { return static_cast < data_t > (mmin + (mmax - mmin) * (float)(rand() % 100) / 100. f); } else if (one_of(type2dtype < data_t >: : dtype, data_type: : s8, data_type: : u8, data_type: : s32)) { return data_t(mmin + rand() % (s32) (mmax - mmin)); } else { return data_t(0); } } template < typename T > void fill_data(T * p, size_t sz, T a, T b) { for (size_t i = 0; i < sz; i++) { p[i] = set_value < T > (i, std: : min(a, b), std: :max(a, b)); } } template < typename T > void compare_array(T * dst, T * ref, size_t sz) { for (size_t i = 0; i < sz; ++i) { if (std: : is_same < T, f32 >: :value) { f32 diff = dst[i] - ref[i]; f32 e = (std::fabs(ref[i]) > (f32) 1e-4) ? diff / ref[i] : diff; EXPECT_NEAR(e, 0. f, (f32) 1e-4) << "Index: " << i << " Total: " << sz; } else { EXPECT_EQ(dst[i], ref[i]) << "Index: " << i << " Total: " << sz; } } } } }
/** * This file defines some utilities that used in gtest */ #pragma once #include <cmath> #include "gtest/gtest.h" #include "jitinfer.h" #include "omp_thread.h" #include "util.h" #include "util_jitinfer.h" namespace jitinfer { namespace util { template < typename data_t > static inline data_t set_value(size_t index) { using data_type = jitinfer::memory::dtype; if (type2dtype < data_t >: : dtype == data_type: : f32) { double mean = 1., deviation = 1e-2; return static_cast < data_t > (mean + deviation * sinf(float (index % 37))); } else if (one_of(type2dtype < data_t >: : dtype, data_type: : s8, data_type: : s32)) { return data_t(rand() % 21 - 10); } else if (type2dtype < data_t >: : dtype == data_type: : u8) { return data_t(rand() % 17); } else { return data_t(0); } } template < typename T > void fill_data(T * p, size_t sz) { #pragma omp parallel for schedule(static) for (size_t i = 0; i < sz; i++) { p[i] = set_value < T > (i); } } template < typename data_t > static inline data_t set_value(size_t index, data_t mmin, data_t mmax) { using data_type = jitinfer::memory::dtype; if (type2dtype < data_t >: : dtype == data_type: : f32) { return static_cast < data_t > (mmin + (mmax - mmin) * (float)(rand() % 100) / 100. f); } else if (one_of(type2dtype < data_t >: : dtype, data_type: : s8, data_type: : u8, data_type: : s32)) { return data_t(mmin + rand() % (s32) (mmax - mmin)); } else { return data_t(0); } } template < typename T > void fill_data(T * p, size_t sz, T a, T b) { #pragma omp parallel for schedule(static) for (size_t i = 0; i < sz; i++) { p[i] = set_value < T > (i, std: : min(a, b), std: :max(a, b)); } } template < typename T > void compare_array(T * dst, T * ref, size_t sz) { #pragma omp parallel for schedule(static) for (size_t i = 0; i < sz; ++i) { if (std: : is_same < T, f32 >: :value) { f32 diff = dst[i] - ref[i]; f32 e = (std::fabs(ref[i]) > (f32) 1e-4) ? diff / ref[i] : diff; EXPECT_NEAR(e, 0. f, (f32) 1e-4) << "Index: " << i << " Total: " << sz; } else { EXPECT_EQ(dst[i], ref[i]) << "Index: " << i << " Total: " << sz; } } } } }
threading_utils.h
/*! * Copyright 2015-2019 by Contributors * \file common.h * \brief Threading utilities */ #ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <vector> #include <algorithm> #include "xgboost/logging.h" namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template <typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); nthreads = std::min(nthreads, omp_get_max_threads()); nthreads = std::max(nthreads, 1); dmlc::OMPException exc; #pragma omp parallel num_threads(nthreads) { exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, size_t nthreads, Func fn) { dmlc::OMPException exc; #pragma omp parallel for num_threads(nthreads) schedule(static) for (Index i = 0; i < size; ++i) { exc.Run(fn, i); } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, Func fn) { ParallelFor(size, omp_get_max_threads(), fn); } /* \brief Configure parallel threads. * * \param p_threads Number of threads, when it's less than or equal to 0, this function * will change it to number of process on system. * * \return Global openmp max threads before configuration. */ inline int32_t OmpSetNumThreads(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = omp_get_num_procs(); } omp_set_num_threads(threads); return nthread_original; } inline int32_t OmpSetNumThreadsWithoutHT(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = nthread_original; } omp_set_num_threads(threads); return nthread_original; } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
#ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <vector> #include <algorithm> #include "xgboost/logging.h" namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template <typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); nthreads = std::min(nthreads, omp_get_max_threads()); nthreads = std::max(nthreads, 1); dmlc::OMPException exc; exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, size_t nthreads, Func fn) { dmlc::OMPException exc; for (Index i = 0; i < size; ++i) { exc.Run(fn, i); } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, Func fn) { ParallelFor(size, omp_get_max_threads(), fn); } /* \brief Configure parallel threads. * * \param p_threads Number of threads, when it's less than or equal to 0, this function * will change it to number of process on system. * * \return Global openmp max threads before configuration. */ inline int32_t OmpSetNumThreads(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = omp_get_num_procs(); } omp_set_num_threads(threads); return nthread_original; } inline int32_t OmpSetNumThreadsWithoutHT(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = nthread_original; } omp_set_num_threads(threads); return nthread_original; } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
#ifndef XGBOOST_COMMON_THREADING_UTILS_H_ #define XGBOOST_COMMON_THREADING_UTILS_H_ #include <dmlc/common.h> #include <vector> #include <algorithm> #include "xgboost/logging.h" namespace xgboost { namespace common { // Represent simple range of indexes [begin, end) // Inspired by tbb::blocked_range class Range1d { public: Range1d(size_t begin, size_t end): begin_(begin), end_(end) { CHECK_LT(begin, end); } size_t begin() const { // NOLINT return begin_; } size_t end() const { // NOLINT return end_; } private: size_t begin_; size_t end_; }; // Split 2d space to balanced blocks // Implementation of the class is inspired by tbb::blocked_range2d // However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example: // [ 1,2,3 ] // [ 4,5,6 ] // [ 7,8,9 ] // But the class is able to work with different sizes in each 'row'. Example: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // If grain_size is 2: It produces following blocks: // [1,2], [3,4], [5,6], [7,8], [9] // The class helps to process data in several tree nodes (non-balanced usually) in parallel // Using nested parallelism (by nodes and by data in each node) // it helps to improve CPU resources utilization class BlockedSpace2d { public: // Example of space: // [ 1,2 ] // [ 3,4,5,6 ] // [ 7,8,9] // BlockedSpace2d will create following blocks (tasks) if grain_size=2: // 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values) // 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values) // 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values) // 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values) // 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values) // Arguments: // dim1 - size of the first dimension in the space // getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index // grain_size - max size of produced blocks template<typename Func> BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) { for (size_t i = 0; i < dim1; ++i) { const size_t size = getter_size_dim2(i); const size_t n_blocks = size/grain_size + !!(size % grain_size); for (size_t iblock = 0; iblock < n_blocks; ++iblock) { const size_t begin = iblock * grain_size; const size_t end = std::min(begin + grain_size, size); AddBlock(i, begin, end); } } } // Amount of blocks(tasks) in a space size_t Size() const { return ranges_.size(); } // get index of the first dimension of i-th block(task) size_t GetFirstDimension(size_t i) const { CHECK_LT(i, first_dimension_.size()); return first_dimension_[i]; } // get a range of indexes for the second dimension of i-th block(task) Range1d GetRange(size_t i) const { CHECK_LT(i, ranges_.size()); return ranges_[i]; } private: void AddBlock(size_t first_dimension, size_t begin, size_t end) { first_dimension_.push_back(first_dimension); ranges_.emplace_back(begin, end); } std::vector<Range1d> ranges_; std::vector<size_t> first_dimension_; }; // Wrapper to implement nested parallelism with simple omp parallel for template <typename Func> void ParallelFor2d(const BlockedSpace2d& space, int nthreads, Func func) { const size_t num_blocks_in_space = space.Size(); nthreads = std::min(nthreads, omp_get_max_threads()); nthreads = std::max(nthreads, 1); dmlc::OMPException exc; #pragma omp parallel num_threads(nthreads) { exc.Run([&]() { size_t tid = omp_get_thread_num(); size_t chunck_size = num_blocks_in_space / nthreads + !!(num_blocks_in_space % nthreads); size_t begin = chunck_size * tid; size_t end = std::min(begin + chunck_size, num_blocks_in_space); for (auto i = begin; i < end; i++) { func(space.GetFirstDimension(i), space.GetRange(i)); } }); } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, size_t nthreads, Func fn) { dmlc::OMPException exc; #pragma omp parallel for num_threads(nthreads) schedule(static) for (Index i = 0; i < size; ++i) { exc.Run(fn, i); } exc.Rethrow(); } template <typename Index, typename Func> void ParallelFor(Index size, Func fn) { ParallelFor(size, omp_get_max_threads(), fn); } /* \brief Configure parallel threads. * * \param p_threads Number of threads, when it's less than or equal to 0, this function * will change it to number of process on system. * * \return Global openmp max threads before configuration. */ inline int32_t OmpSetNumThreads(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = omp_get_num_procs(); } omp_set_num_threads(threads); return nthread_original; } inline int32_t OmpSetNumThreadsWithoutHT(int32_t* p_threads) { auto& threads = *p_threads; int32_t nthread_original = omp_get_max_threads(); if (threads <= 0) { threads = nthread_original; } omp_set_num_threads(threads); return nthread_original; } } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_THREADING_UTILS_H_
cloud.c
#include <string> #include <iostream> #include <algorithm> #include <utility> #include <tfhe/tfhe.h> #include <tfhe/tfhe_io.h> #include <stdio.h> #include <time.h> #include <vector> #include <cassert> #include <sys/time.h> #include <omp.h> #include <fstream> using namespace std; ifstream read; #define T_FILE "averagestandard.txt" void add(LweSample *sum, LweSample *carryover, const LweSample *x, const LweSample *y, const LweSample *c, const int32_t nb_bits, const TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *carry = new_LweSample_array(1, in_out_params); LweSample *axc = new_LweSample_array(1, in_out_params); LweSample *bxc = new_LweSample_array(1, in_out_params); bootsCOPY(carry, c, keyset); for(int32_t i = 0; i < nb_bits; i++) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsXOR(axc, x + i, carry, keyset); #pragma omp section bootsXOR(bxc, y + i, carry, keyset); } #pragma omp parallel sections num_threads(2) { #pragma omp section bootsXOR(sum + i, x + i, bxc, keyset); #pragma omp section bootsAND(axc, axc, bxc, keyset); } bootsXOR(carry, carry, axc, keyset); } bootsCOPY(carryover, carry, keyset); delete_LweSample_array(1, carry); delete_LweSample_array(1, axc); delete_LweSample_array(1, bxc); } void zero(LweSample* result, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsCONSTANT(result + i, 0, keyset);} } void NOT(LweSample* result, const LweSample* x, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsNOT(result + i, x + i, keyset);} } void split(LweSample *finalresult, LweSample *finalresult2, LweSample *finalresult3, LweSample *a, LweSample *b, LweSample *c, LweSample *d,LweSample *e, const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *sum = new_LweSample_array(32, in_out_params); LweSample *sum2 = new_LweSample_array(32, in_out_params); LweSample *sum3 = new_LweSample_array(32, in_out_params); LweSample *carryover = new_LweSample_array(32, in_out_params); LweSample *carryover2 = new_LweSample_array(32, in_out_params); LweSample *carryover3 = new_LweSample_array(32, in_out_params); for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum + i, 0, keyset); bootsCONSTANT(sum2 + i, 0, keyset); bootsCONSTANT(sum3 + i, 0, keyset); bootsCONSTANT(carryover + i, 0, keyset); bootsCONSTANT(carryover2 + i, 0, keyset); bootsCONSTANT(carryover3 + i, 0, keyset); } //adding the 2nd result with the carry add(sum, carryover, e, b, carry, nb_bits, keyset); add(sum2, carryover2, d, a, carryover, nb_bits, keyset); add(sum3, carryover3, c, carryover2,carry,nb_bits, keyset); for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult + i, sum3 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult2 + i, sum2 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult3 + i, sum + i, keyset); } delete_LweSample_array(32, sum); delete_LweSample_array(32, sum2); delete_LweSample_array(32, sum3); delete_LweSample_array(32, carryover); delete_LweSample_array(32, carryover2); delete_LweSample_array(32, carryover3); } void mul32(LweSample *result, LweSample *result2, LweSample *a, LweSample *b,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsAND(tmp + k, a + k, b + i, keyset); } } if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside for (int32_t i = 0; i < 32 - round; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //the rest of the bits that couldnt fit inside for (int32_t i = 0; i < round; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + 32 - round, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c2 + i, keyset); bootsCOPY(result2 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); } void mul64(LweSample *result, LweSample *result2,LweSample *result3, LweSample *a, LweSample *b,LweSample *c,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsAND(tmp + k, a + k, c + i, keyset); #pragma omp section bootsAND(tmp2 + k, b + k, c + i, keyset); } } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } } //some of tmp2 to remaining of tmp3c2 //repeats 31 times for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } } //the rest of tmp2 to tmp3c3 //repeats 1 time for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c3 + i, keyset); bootsCOPY(result2 + i, sum3c2 + i, keyset); bootsCOPY(result3 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); } void mul128(LweSample *result, LweSample *result2,LweSample *result3,LweSample *result4,LweSample *result5, LweSample *a, LweSample *b,LweSample *c,LweSample *d, LweSample *e,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *sum3c4 = new_LweSample_array(32, in_out_params); LweSample *sum3c5 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3 = new_LweSample_array(32, in_out_params); LweSample *tmp4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c5 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); LweSample *carry5 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(sum3c4 + i, 0, keyset); bootsCONSTANT(sum3c5 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3 + i, 0, keyset); bootsCONSTANT(tmp4 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(tmp3c5 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); bootsCONSTANT(carry5 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(4) { #pragma omp section bootsAND(tmp + k, a + k, e + i, keyset); #pragma omp section bootsAND(tmp2 + k, b + k, e + i, keyset); #pragma omp section bootsAND(tmp3 + k, c + k, e + i, keyset); #pragma omp section bootsAND(tmp4 + k, d + k, e + i, keyset); } } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } } //some of tmp2 to remaining of tmp3c2 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } } //remaining tmp2 to tmp3c3 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } } //some of tmp3 to remaining tmp3c3 for (int32_t i = 0; i < counter1; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i + counter2, tmp3 + i, keyset); } } //rest of tmp3 to tmp3c4 for (int32_t i = 0; i < counter2; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c4 + i, tmp3 + i + counter1, keyset); } } //some of tmp4 to remaining tmp3c4 for (int32_t i = 0; i < counter1; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c4 + i + counter2, tmp4 + i, keyset); } } //rest of tmp4 to tmp3c5 for (int32_t i = 0; i < counter2; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c5 + i, tmp4 + i + counter1, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); add(sum3c4, carry4, sum3c4, tmp3c4, carry3, 32, keyset); add(sum3c5, carry5, sum3c5, tmp3c5, carry4, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c5 + i, keyset); bootsCOPY(result2 + i, sum3c4 + i, keyset); bootsCOPY(result3 + i, sum3c3 + i, keyset); bootsCOPY(result4 + i, sum3c2 + i, keyset); bootsCOPY(result5 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, sum3c4); delete_LweSample_array(32, sum3c5); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3); delete_LweSample_array(32, tmp4); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, tmp3c5); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); delete_LweSample_array(32, carry5); } int main() { // sidh_cipher_cloud should have already appended 2 cipherstreams into cloud.data printf("Reading the key...\n"); // reads the cloud key from file FILE* cloud_key = fopen("cloud.key", "rb"); TFheGateBootstrappingCloudKeySet* bk = new_tfheGateBootstrappingCloudKeySet_fromFile(cloud_key); fclose(cloud_key); // reads the nbit key from file FILE* nbit_key = fopen("nbit.key","rb"); TFheGateBootstrappingSecretKeySet* nbitkey = new_tfheGateBootstrappingSecretKeySet_fromFile(nbit_key); fclose(nbit_key); // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* params = bk->params; // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* nbitparams = nbitkey->params; // Create ciphertext blocks for negative1, bit1, negative2, bit2 and values LweSample* ciphertextbit = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertext1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); printf("Reading input 1...\n"); // reads ciphertexts from cloud.data FILE* cloud_data = fopen("cloud.data", "rb"); for (int i = 0; i<32; i++) // line0 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative1[i], nbitparams); for (int i = 0; i<32; i++) // line1 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit1[i], nbitparams); // Decrypts bit size1 int32_t int_bit1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit1[i],nbitkey)>0; int_bit1 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext1[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext2[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext3[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext4[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext5[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext6[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext7[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext8[i], params); for (int i = 0; i<32; i++) // line10 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry1[i], params); printf("Reading input 2...\n"); for (int i = 0; i<32; i++) // line11 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative2[i], nbitparams); for (int i = 0; i<32; i++) // line12 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit2[i], nbitparams); // Decrypts bit size2 int32_t int_bit2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit2[i],nbitkey)>0; int_bit2 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext9[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext10[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext11[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext12[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext13[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext14[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext15[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext16[i], params); for (int i = 0; i<32; i++) // line21 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry2[i], params); printf("Reading operation code...\n"); // Get Operation Code from File int32_t int_op; read.open("operator.txt"); read >> int_op; // Homomorphic encryption to add negative1 and negative2 ciphertexts LweSample* ciphertextnegative = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); // add(ciphertextnegative, carry1, ciphertextnegative1, ciphertextnegative2, ciphertextcarry1, 32, nbitcloudkey); // NOTE // Decrypts Negative1 int32_t int_negative1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative1[i],nbitkey)>0; int_negative1 |= (ai<<i); } std::cout << int_negative1 << " => negative1" << "\n"; // convert first value negativity code from 2 to 1 if (int_negative1 == 2){ int_negative1 = 1;} // Decrypts Negative2 int32_t int_negative2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative2[i],nbitkey)>0; int_negative2 |= (ai<<i); } std::cout << int_negative2 << " => negative2" << "\n"; // Add Negatives. // If both v1 & v2 are positive, int_negative = 0 // If only v1 is negative, int_negative = 1 // If only v2 is negative, int_negative = 2 // If both v1 & v2 are negative, int_negative = 3 int32_t int_negative; int_negative = (int_negative1 + int_negative2); // std::cout << int_negative << " -> negatives" << "\n"; //export the negative and bit data for the verif FILE* answer_data = fopen("answer.data", "wb"); // Write negative to answer.data int32_t ciphernegative = 0; if (int_negative == 1){ ciphernegative = 1; } if (int_negative == 2){ ciphernegative = 2; } if (int_negative == 3){ ciphernegative = 4; } for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextnegative[i], (ciphernegative>>i)&1, nbitkey); } for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextnegative[i], nbitparams); std::cout << ciphernegative << " => total negatives" << "\n"; delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative); // Compare bit sizes int32_t int_bit = 0; if (int_op == 4){ if (int_bit1 >= int_bit2){int_bit = (int_bit1 * 2);} else{int_bit = (int_bit2 * 2);} for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextbit[i], (int_bit>>i)&1, nbitkey);} for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; if (int_bit1 >= int_bit2){int_bit = int_bit1;} else{int_bit = int_bit2;} } else if (int_bit1 >= int_bit2) { int_bit = int_bit1; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit1[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } else{ int_bit = int_bit2; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit2[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } fclose(cloud_data); // If trying to multiply a 256 bit number if ((int_op == 4) && (int_bit >= 256)){ std::cout << "Cannot multiply 256 bit number!" << "\n"; fclose(answer_data); return 126; } // Addition //if (the operation is add AND (both numbers are positive OR both numbers are negative)) OR (the operation is subtract AND either number is negative) // A+B, [(-A)+(-B)], A-(-B), (-A)-(B) if ((int_op == 1 && (int_negative != 1 && int_negative != 2 )) || (int_op == 2 && (int_negative == 1 || int_negative == 2))) { if (int_op == 1){ std::cout << int_bit << " bit Addition computation" << "\n"; }else{ std::cout << int_bit << " bit Subtraction computation" << "\n"; } //32 Bit Addition if (int_bit == 32) { // Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); // Timings gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); printf("writing the answer to file...\n"); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //64 Bit Addition if (int_bit == 64) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //128 Bit Addition if (int_bit == 128) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); // Timing gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //256 Bit Addition if (int_bit == 256) { // do some operations on the ciphertexts: here, we will compute the // addition of the two LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); // Timing struct timeval start, end; double get_time; gettimeofday(&start, NULL); add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); add(result5, carry5, ciphertext5, ciphertext13, carry4, 32, bk); add(result6, carry6, ciphertext6, ciphertext14, carry5, 32, bk); add(result7, carry7, ciphertext7, ciphertext15, carry6, 32, bk); add(result8, carry8, ciphertext8, ciphertext16, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the 64 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, carry5); delete_gate_bootstrapping_ciphertext_array(32, carry6); delete_gate_bootstrapping_ciphertext_array(32, carry7); delete_gate_bootstrapping_ciphertext_array(32, carry8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } // Subtraction // If the operation is subtract OR (the operation is addition AND either one of the values are negative) A-B, A+(-B), (-A)+B else if (int_op == 2 || (int_op == 1 && (int_negative == 1 || int_negative == 2))){ // Normal Subtraction computation with no negative numbers A-B OR Addition with 2nd number negative A+(-B) if ((int_op == 2 && int_negative == 0) || (int_op == 1 && int_negative == 2)){ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 2nd value negative" << "\n"; } //32 Bit Subtraction if(int_bit == 32) { printf("Doing the homomorphic computation...\n"); LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //64 Bit Subtraction if(int_bit == 64) { LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //128 Bit Subtraction if(int_bit == 128) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //256 Bit Subtraction if (int_bit == 256) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); NOT(inverse5, ciphertext13, bk, 32); NOT(inverse6, ciphertext14, bk, 32); NOT(inverse7, ciphertext15, bk, 32); NOT(inverse8, ciphertext16, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext5, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext6, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext7, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext8, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) //result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) //result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) //result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) //result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) //result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) //result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } //Addition (for subtraction) with value 1 being a negative number (-A)+B else{ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 1st value negative" << "\n"; } if(int_bit == 32){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 64){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value,(-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 256){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); NOT(inverse5, ciphertext5, bk, 32); NOT(inverse6, ciphertext6, bk, 32); NOT(inverse7, ciphertext7, bk, 32); NOT(inverse8, ciphertext8, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext13, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext14, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext15, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext16, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // 1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } } // If Multiplication else if (int_op == 4){ std::cout << int_bit << " bit Multiplication computation" << "\n"; if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result17 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result18 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result19 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result20 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover15 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul128(result1, result2, result3, result4, result5, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul128(result6, result7, result8, result9, result10, ciphertext1, ciphertext2, ciphertext3, ciphertext4, ciphertext10,ciphertextcarry1, 32, bk); //result3 mul128(result11, result12, result13, result14, result15, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext11,ciphertextcarry1, 32, bk); //result4 mul128(result16,result17, result18,result19,result20, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext12,ciphertextcarry1, 32, bk); add(sum1, carryover1, result10, result4, ciphertextcarry1, 32, bk); add(sum2, carryover2, result9, result3,carryover1,32, bk); add(sum3, carryover3, result8, result2,carryover2,32, bk); add(sum4, carryover4, result7, result1,carryover3,32, bk); add(sum5, carryover5, result6, ciphertextcarry1,carryover4,32, bk); add(sum6, carryover6, sum2, result15,carryover5,32, bk); add(sum7, carryover7, sum3, result14,carryover6,32, bk); add(sum8, carryover8, sum4, result13,carryover7,32, bk); add(sum9, carryover9, sum5, result12,carryover8,32, bk); add(sum10, carryover10, result11, ciphertextcarry1,carryover9,32, bk); add(sum11, carryover11, sum7, result20,carryover10,32, bk); add(sum12, carryover12, sum8, result19,carryover11,32, bk); add(sum13, carryover13, sum9, result18,carryover12,32, bk); add(sum14, carryover14, sum10, result17,carryover13,32, bk); add(sum15, carryover15, result16 , ciphertextcarry1,carryover14,32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum1[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum6[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum11[i], params); for (int i=0; i<32; i++) // result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum12[i], params); for (int i=0; i<32; i++) // result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum13[i], params); for (int i=0; i<32; i++) // result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum14[i], params); for (int i=0; i<32; i++) // result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum15[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, result9); delete_gate_bootstrapping_ciphertext_array(32, result10); delete_gate_bootstrapping_ciphertext_array(32, result11); delete_gate_bootstrapping_ciphertext_array(32, result12); delete_gate_bootstrapping_ciphertext_array(32, result13); delete_gate_bootstrapping_ciphertext_array(32, result14); delete_gate_bootstrapping_ciphertext_array(32, result15); delete_gate_bootstrapping_ciphertext_array(32, result16); delete_gate_bootstrapping_ciphertext_array(32, result17); delete_gate_bootstrapping_ciphertext_array(32, result18); delete_gate_bootstrapping_ciphertext_array(32, result19); delete_gate_bootstrapping_ciphertext_array(32, result20); delete_gate_bootstrapping_ciphertext_array(32, sum1); delete_gate_bootstrapping_ciphertext_array(32, sum2); delete_gate_bootstrapping_ciphertext_array(32, sum3); delete_gate_bootstrapping_ciphertext_array(32, sum4); delete_gate_bootstrapping_ciphertext_array(32, sum5); delete_gate_bootstrapping_ciphertext_array(32, sum6); delete_gate_bootstrapping_ciphertext_array(32, sum7); delete_gate_bootstrapping_ciphertext_array(32, sum8); delete_gate_bootstrapping_ciphertext_array(32, sum9); delete_gate_bootstrapping_ciphertext_array(32, sum10); delete_gate_bootstrapping_ciphertext_array(32, sum11); delete_gate_bootstrapping_ciphertext_array(32, sum12); delete_gate_bootstrapping_ciphertext_array(32, sum13); delete_gate_bootstrapping_ciphertext_array(32, sum14); delete_gate_bootstrapping_ciphertext_array(32, sum15); delete_gate_bootstrapping_ciphertext_array(32, carryover1); delete_gate_bootstrapping_ciphertext_array(32, carryover2); delete_gate_bootstrapping_ciphertext_array(32, carryover3); delete_gate_bootstrapping_ciphertext_array(32, carryover4); delete_gate_bootstrapping_ciphertext_array(32, carryover5); delete_gate_bootstrapping_ciphertext_array(32, carryover6); delete_gate_bootstrapping_ciphertext_array(32, carryover7); delete_gate_bootstrapping_ciphertext_array(32, carryover8); delete_gate_bootstrapping_ciphertext_array(32, carryover9); delete_gate_bootstrapping_ciphertext_array(32, carryover10); delete_gate_bootstrapping_ciphertext_array(32, carryover11); delete_gate_bootstrapping_ciphertext_array(32, carryover12); delete_gate_bootstrapping_ciphertext_array(32, carryover13); delete_gate_bootstrapping_ciphertext_array(32, carryover14); delete_gate_bootstrapping_ciphertext_array(32, carryover15); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 64){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult3 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul64(result1,result2, result3, ciphertext1, ciphertext2,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul64(result4,result5, result6, ciphertext1, ciphertext2,ciphertext10,ciphertextcarry1, 32, bk); split(finalresult,finalresult2, finalresult3, result1, result2,result4,result5,result6,ciphertextcarry1,32,bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult3[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult2[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_ciphertext_array(32, finalresult); delete_gate_bootstrapping_ciphertext_array(32, finalresult2); delete_gate_bootstrapping_ciphertext_array(32, finalresult3); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 32){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul32(result1,result2,ciphertext1, ciphertext9,ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } }
#include <string> #include <iostream> #include <algorithm> #include <utility> #include <tfhe/tfhe.h> #include <tfhe/tfhe_io.h> #include <stdio.h> #include <time.h> #include <vector> #include <cassert> #include <sys/time.h> #include <omp.h> #include <fstream> using namespace std; ifstream read; #define T_FILE "averagestandard.txt" void add(LweSample *sum, LweSample *carryover, const LweSample *x, const LweSample *y, const LweSample *c, const int32_t nb_bits, const TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *carry = new_LweSample_array(1, in_out_params); LweSample *axc = new_LweSample_array(1, in_out_params); LweSample *bxc = new_LweSample_array(1, in_out_params); bootsCOPY(carry, c, keyset); for(int32_t i = 0; i < nb_bits; i++) { bootsXOR(axc, x + i, carry, keyset); bootsXOR(bxc, y + i, carry, keyset); bootsXOR(sum + i, x + i, bxc, keyset); bootsAND(axc, axc, bxc, keyset); bootsXOR(carry, carry, axc, keyset); } bootsCOPY(carryover, carry, keyset); delete_LweSample_array(1, carry); delete_LweSample_array(1, axc); delete_LweSample_array(1, bxc); } void zero(LweSample* result, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsCONSTANT(result + i, 0, keyset);} } void NOT(LweSample* result, const LweSample* x, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsNOT(result + i, x + i, keyset);} } void split(LweSample *finalresult, LweSample *finalresult2, LweSample *finalresult3, LweSample *a, LweSample *b, LweSample *c, LweSample *d,LweSample *e, const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *sum = new_LweSample_array(32, in_out_params); LweSample *sum2 = new_LweSample_array(32, in_out_params); LweSample *sum3 = new_LweSample_array(32, in_out_params); LweSample *carryover = new_LweSample_array(32, in_out_params); LweSample *carryover2 = new_LweSample_array(32, in_out_params); LweSample *carryover3 = new_LweSample_array(32, in_out_params); for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum + i, 0, keyset); bootsCONSTANT(sum2 + i, 0, keyset); bootsCONSTANT(sum3 + i, 0, keyset); bootsCONSTANT(carryover + i, 0, keyset); bootsCONSTANT(carryover2 + i, 0, keyset); bootsCONSTANT(carryover3 + i, 0, keyset); } //adding the 2nd result with the carry add(sum, carryover, e, b, carry, nb_bits, keyset); add(sum2, carryover2, d, a, carryover, nb_bits, keyset); add(sum3, carryover3, c, carryover2,carry,nb_bits, keyset); for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult + i, sum3 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult2 + i, sum2 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult3 + i, sum + i, keyset); } delete_LweSample_array(32, sum); delete_LweSample_array(32, sum2); delete_LweSample_array(32, sum3); delete_LweSample_array(32, carryover); delete_LweSample_array(32, carryover2); delete_LweSample_array(32, carryover3); } void mul32(LweSample *result, LweSample *result2, LweSample *a, LweSample *b,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit bootsAND(tmp + k, a + k, b + i, keyset); } if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside for (int32_t i = 0; i < 32 - round; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } //the rest of the bits that couldnt fit inside for (int32_t i = 0; i < round; ++i) { bootsCOPY(tmp3c2 + i, tmp + i + 32 - round, keyset); } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c2 + i, keyset); bootsCOPY(result2 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); } void mul64(LweSample *result, LweSample *result2,LweSample *result3, LweSample *a, LweSample *b,LweSample *c,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit bootsAND(tmp + k, a + k, c + i, keyset); bootsAND(tmp2 + k, b + k, c + i, keyset); } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } //some of tmp2 to remaining of tmp3c2 //repeats 31 times for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } //the rest of tmp2 to tmp3c3 //repeats 1 time for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c3 + i, keyset); bootsCOPY(result2 + i, sum3c2 + i, keyset); bootsCOPY(result3 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); } void mul128(LweSample *result, LweSample *result2,LweSample *result3,LweSample *result4,LweSample *result5, LweSample *a, LweSample *b,LweSample *c,LweSample *d, LweSample *e,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *sum3c4 = new_LweSample_array(32, in_out_params); LweSample *sum3c5 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3 = new_LweSample_array(32, in_out_params); LweSample *tmp4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c5 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); LweSample *carry5 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(sum3c4 + i, 0, keyset); bootsCONSTANT(sum3c5 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3 + i, 0, keyset); bootsCONSTANT(tmp4 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(tmp3c5 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); bootsCONSTANT(carry5 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit bootsAND(tmp + k, a + k, e + i, keyset); bootsAND(tmp2 + k, b + k, e + i, keyset); bootsAND(tmp3 + k, c + k, e + i, keyset); bootsAND(tmp4 + k, d + k, e + i, keyset); } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } //some of tmp2 to remaining of tmp3c2 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } //remaining tmp2 to tmp3c3 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } //some of tmp3 to remaining tmp3c3 for (int32_t i = 0; i < counter1; ++i) { bootsCOPY(tmp3c3 + i + counter2, tmp3 + i, keyset); } //rest of tmp3 to tmp3c4 for (int32_t i = 0; i < counter2; ++i) { bootsCOPY(tmp3c4 + i, tmp3 + i + counter1, keyset); } //some of tmp4 to remaining tmp3c4 for (int32_t i = 0; i < counter1; ++i) { bootsCOPY(tmp3c4 + i + counter2, tmp4 + i, keyset); } //rest of tmp4 to tmp3c5 for (int32_t i = 0; i < counter2; ++i) { bootsCOPY(tmp3c5 + i, tmp4 + i + counter1, keyset); } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); add(sum3c4, carry4, sum3c4, tmp3c4, carry3, 32, keyset); add(sum3c5, carry5, sum3c5, tmp3c5, carry4, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c5 + i, keyset); bootsCOPY(result2 + i, sum3c4 + i, keyset); bootsCOPY(result3 + i, sum3c3 + i, keyset); bootsCOPY(result4 + i, sum3c2 + i, keyset); bootsCOPY(result5 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, sum3c4); delete_LweSample_array(32, sum3c5); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3); delete_LweSample_array(32, tmp4); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, tmp3c5); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); delete_LweSample_array(32, carry5); } int main() { // sidh_cipher_cloud should have already appended 2 cipherstreams into cloud.data printf("Reading the key...\n"); // reads the cloud key from file FILE* cloud_key = fopen("cloud.key", "rb"); TFheGateBootstrappingCloudKeySet* bk = new_tfheGateBootstrappingCloudKeySet_fromFile(cloud_key); fclose(cloud_key); // reads the nbit key from file FILE* nbit_key = fopen("nbit.key","rb"); TFheGateBootstrappingSecretKeySet* nbitkey = new_tfheGateBootstrappingSecretKeySet_fromFile(nbit_key); fclose(nbit_key); // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* params = bk->params; // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* nbitparams = nbitkey->params; // Create ciphertext blocks for negative1, bit1, negative2, bit2 and values LweSample* ciphertextbit = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertext1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); printf("Reading input 1...\n"); // reads ciphertexts from cloud.data FILE* cloud_data = fopen("cloud.data", "rb"); for (int i = 0; i<32; i++) // line0 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative1[i], nbitparams); for (int i = 0; i<32; i++) // line1 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit1[i], nbitparams); // Decrypts bit size1 int32_t int_bit1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit1[i],nbitkey)>0; int_bit1 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext1[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext2[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext3[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext4[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext5[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext6[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext7[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext8[i], params); for (int i = 0; i<32; i++) // line10 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry1[i], params); printf("Reading input 2...\n"); for (int i = 0; i<32; i++) // line11 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative2[i], nbitparams); for (int i = 0; i<32; i++) // line12 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit2[i], nbitparams); // Decrypts bit size2 int32_t int_bit2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit2[i],nbitkey)>0; int_bit2 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext9[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext10[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext11[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext12[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext13[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext14[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext15[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext16[i], params); for (int i = 0; i<32; i++) // line21 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry2[i], params); printf("Reading operation code...\n"); // Get Operation Code from File int32_t int_op; read.open("operator.txt"); read >> int_op; // Homomorphic encryption to add negative1 and negative2 ciphertexts LweSample* ciphertextnegative = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); // add(ciphertextnegative, carry1, ciphertextnegative1, ciphertextnegative2, ciphertextcarry1, 32, nbitcloudkey); // NOTE // Decrypts Negative1 int32_t int_negative1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative1[i],nbitkey)>0; int_negative1 |= (ai<<i); } std::cout << int_negative1 << " => negative1" << "\n"; // convert first value negativity code from 2 to 1 if (int_negative1 == 2){ int_negative1 = 1;} // Decrypts Negative2 int32_t int_negative2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative2[i],nbitkey)>0; int_negative2 |= (ai<<i); } std::cout << int_negative2 << " => negative2" << "\n"; // Add Negatives. // If both v1 & v2 are positive, int_negative = 0 // If only v1 is negative, int_negative = 1 // If only v2 is negative, int_negative = 2 // If both v1 & v2 are negative, int_negative = 3 int32_t int_negative; int_negative = (int_negative1 + int_negative2); // std::cout << int_negative << " -> negatives" << "\n"; //export the negative and bit data for the verif FILE* answer_data = fopen("answer.data", "wb"); // Write negative to answer.data int32_t ciphernegative = 0; if (int_negative == 1){ ciphernegative = 1; } if (int_negative == 2){ ciphernegative = 2; } if (int_negative == 3){ ciphernegative = 4; } for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextnegative[i], (ciphernegative>>i)&1, nbitkey); } for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextnegative[i], nbitparams); std::cout << ciphernegative << " => total negatives" << "\n"; delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative); // Compare bit sizes int32_t int_bit = 0; if (int_op == 4){ if (int_bit1 >= int_bit2){int_bit = (int_bit1 * 2);} else{int_bit = (int_bit2 * 2);} for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextbit[i], (int_bit>>i)&1, nbitkey);} for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; if (int_bit1 >= int_bit2){int_bit = int_bit1;} else{int_bit = int_bit2;} } else if (int_bit1 >= int_bit2) { int_bit = int_bit1; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit1[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } else{ int_bit = int_bit2; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit2[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } fclose(cloud_data); // If trying to multiply a 256 bit number if ((int_op == 4) && (int_bit >= 256)){ std::cout << "Cannot multiply 256 bit number!" << "\n"; fclose(answer_data); return 126; } // Addition //if (the operation is add AND (both numbers are positive OR both numbers are negative)) OR (the operation is subtract AND either number is negative) // A+B, [(-A)+(-B)], A-(-B), (-A)-(B) if ((int_op == 1 && (int_negative != 1 && int_negative != 2 )) || (int_op == 2 && (int_negative == 1 || int_negative == 2))) { if (int_op == 1){ std::cout << int_bit << " bit Addition computation" << "\n"; }else{ std::cout << int_bit << " bit Subtraction computation" << "\n"; } //32 Bit Addition if (int_bit == 32) { // Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); // Timings gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); printf("writing the answer to file...\n"); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //64 Bit Addition if (int_bit == 64) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //128 Bit Addition if (int_bit == 128) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); // Timing gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //256 Bit Addition if (int_bit == 256) { // do some operations on the ciphertexts: here, we will compute the // addition of the two LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); // Timing struct timeval start, end; double get_time; gettimeofday(&start, NULL); add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); add(result5, carry5, ciphertext5, ciphertext13, carry4, 32, bk); add(result6, carry6, ciphertext6, ciphertext14, carry5, 32, bk); add(result7, carry7, ciphertext7, ciphertext15, carry6, 32, bk); add(result8, carry8, ciphertext8, ciphertext16, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the 64 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, carry5); delete_gate_bootstrapping_ciphertext_array(32, carry6); delete_gate_bootstrapping_ciphertext_array(32, carry7); delete_gate_bootstrapping_ciphertext_array(32, carry8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } // Subtraction // If the operation is subtract OR (the operation is addition AND either one of the values are negative) A-B, A+(-B), (-A)+B else if (int_op == 2 || (int_op == 1 && (int_negative == 1 || int_negative == 2))){ // Normal Subtraction computation with no negative numbers A-B OR Addition with 2nd number negative A+(-B) if ((int_op == 2 && int_negative == 0) || (int_op == 1 && int_negative == 2)){ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 2nd value negative" << "\n"; } //32 Bit Subtraction if(int_bit == 32) { printf("Doing the homomorphic computation...\n"); LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //64 Bit Subtraction if(int_bit == 64) { LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //128 Bit Subtraction if(int_bit == 128) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //256 Bit Subtraction if (int_bit == 256) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); NOT(inverse5, ciphertext13, bk, 32); NOT(inverse6, ciphertext14, bk, 32); NOT(inverse7, ciphertext15, bk, 32); NOT(inverse8, ciphertext16, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext5, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext6, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext7, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext8, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) //result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) //result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) //result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) //result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) //result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) //result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } //Addition (for subtraction) with value 1 being a negative number (-A)+B else{ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 1st value negative" << "\n"; } if(int_bit == 32){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 64){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value,(-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 256){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); NOT(inverse5, ciphertext5, bk, 32); NOT(inverse6, ciphertext6, bk, 32); NOT(inverse7, ciphertext7, bk, 32); NOT(inverse8, ciphertext8, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext13, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext14, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext15, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext16, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // 1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } } // If Multiplication else if (int_op == 4){ std::cout << int_bit << " bit Multiplication computation" << "\n"; if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result17 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result18 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result19 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result20 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover15 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul128(result1, result2, result3, result4, result5, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul128(result6, result7, result8, result9, result10, ciphertext1, ciphertext2, ciphertext3, ciphertext4, ciphertext10,ciphertextcarry1, 32, bk); //result3 mul128(result11, result12, result13, result14, result15, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext11,ciphertextcarry1, 32, bk); //result4 mul128(result16,result17, result18,result19,result20, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext12,ciphertextcarry1, 32, bk); add(sum1, carryover1, result10, result4, ciphertextcarry1, 32, bk); add(sum2, carryover2, result9, result3,carryover1,32, bk); add(sum3, carryover3, result8, result2,carryover2,32, bk); add(sum4, carryover4, result7, result1,carryover3,32, bk); add(sum5, carryover5, result6, ciphertextcarry1,carryover4,32, bk); add(sum6, carryover6, sum2, result15,carryover5,32, bk); add(sum7, carryover7, sum3, result14,carryover6,32, bk); add(sum8, carryover8, sum4, result13,carryover7,32, bk); add(sum9, carryover9, sum5, result12,carryover8,32, bk); add(sum10, carryover10, result11, ciphertextcarry1,carryover9,32, bk); add(sum11, carryover11, sum7, result20,carryover10,32, bk); add(sum12, carryover12, sum8, result19,carryover11,32, bk); add(sum13, carryover13, sum9, result18,carryover12,32, bk); add(sum14, carryover14, sum10, result17,carryover13,32, bk); add(sum15, carryover15, result16 , ciphertextcarry1,carryover14,32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum1[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum6[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum11[i], params); for (int i=0; i<32; i++) // result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum12[i], params); for (int i=0; i<32; i++) // result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum13[i], params); for (int i=0; i<32; i++) // result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum14[i], params); for (int i=0; i<32; i++) // result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum15[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, result9); delete_gate_bootstrapping_ciphertext_array(32, result10); delete_gate_bootstrapping_ciphertext_array(32, result11); delete_gate_bootstrapping_ciphertext_array(32, result12); delete_gate_bootstrapping_ciphertext_array(32, result13); delete_gate_bootstrapping_ciphertext_array(32, result14); delete_gate_bootstrapping_ciphertext_array(32, result15); delete_gate_bootstrapping_ciphertext_array(32, result16); delete_gate_bootstrapping_ciphertext_array(32, result17); delete_gate_bootstrapping_ciphertext_array(32, result18); delete_gate_bootstrapping_ciphertext_array(32, result19); delete_gate_bootstrapping_ciphertext_array(32, result20); delete_gate_bootstrapping_ciphertext_array(32, sum1); delete_gate_bootstrapping_ciphertext_array(32, sum2); delete_gate_bootstrapping_ciphertext_array(32, sum3); delete_gate_bootstrapping_ciphertext_array(32, sum4); delete_gate_bootstrapping_ciphertext_array(32, sum5); delete_gate_bootstrapping_ciphertext_array(32, sum6); delete_gate_bootstrapping_ciphertext_array(32, sum7); delete_gate_bootstrapping_ciphertext_array(32, sum8); delete_gate_bootstrapping_ciphertext_array(32, sum9); delete_gate_bootstrapping_ciphertext_array(32, sum10); delete_gate_bootstrapping_ciphertext_array(32, sum11); delete_gate_bootstrapping_ciphertext_array(32, sum12); delete_gate_bootstrapping_ciphertext_array(32, sum13); delete_gate_bootstrapping_ciphertext_array(32, sum14); delete_gate_bootstrapping_ciphertext_array(32, sum15); delete_gate_bootstrapping_ciphertext_array(32, carryover1); delete_gate_bootstrapping_ciphertext_array(32, carryover2); delete_gate_bootstrapping_ciphertext_array(32, carryover3); delete_gate_bootstrapping_ciphertext_array(32, carryover4); delete_gate_bootstrapping_ciphertext_array(32, carryover5); delete_gate_bootstrapping_ciphertext_array(32, carryover6); delete_gate_bootstrapping_ciphertext_array(32, carryover7); delete_gate_bootstrapping_ciphertext_array(32, carryover8); delete_gate_bootstrapping_ciphertext_array(32, carryover9); delete_gate_bootstrapping_ciphertext_array(32, carryover10); delete_gate_bootstrapping_ciphertext_array(32, carryover11); delete_gate_bootstrapping_ciphertext_array(32, carryover12); delete_gate_bootstrapping_ciphertext_array(32, carryover13); delete_gate_bootstrapping_ciphertext_array(32, carryover14); delete_gate_bootstrapping_ciphertext_array(32, carryover15); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 64){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult3 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul64(result1,result2, result3, ciphertext1, ciphertext2,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul64(result4,result5, result6, ciphertext1, ciphertext2,ciphertext10,ciphertextcarry1, 32, bk); split(finalresult,finalresult2, finalresult3, result1, result2,result4,result5,result6,ciphertextcarry1,32,bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult3[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult2[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_ciphertext_array(32, finalresult); delete_gate_bootstrapping_ciphertext_array(32, finalresult2); delete_gate_bootstrapping_ciphertext_array(32, finalresult3); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 32){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul32(result1,result2,ciphertext1, ciphertext9,ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } }
#include <string> #include <iostream> #include <algorithm> #include <utility> #include <tfhe/tfhe.h> #include <tfhe/tfhe_io.h> #include <stdio.h> #include <time.h> #include <vector> #include <cassert> #include <sys/time.h> #include <omp.h> #include <fstream> using namespace std; ifstream read; #define T_FILE "averagestandard.txt" void add(LweSample *sum, LweSample *carryover, const LweSample *x, const LweSample *y, const LweSample *c, const int32_t nb_bits, const TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *carry = new_LweSample_array(1, in_out_params); LweSample *axc = new_LweSample_array(1, in_out_params); LweSample *bxc = new_LweSample_array(1, in_out_params); bootsCOPY(carry, c, keyset); for(int32_t i = 0; i < nb_bits; i++) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsXOR(axc, x + i, carry, keyset); #pragma omp section bootsXOR(bxc, y + i, carry, keyset); } #pragma omp parallel sections num_threads(2) { #pragma omp section bootsXOR(sum + i, x + i, bxc, keyset); #pragma omp section bootsAND(axc, axc, bxc, keyset); } bootsXOR(carry, carry, axc, keyset); } bootsCOPY(carryover, carry, keyset); delete_LweSample_array(1, carry); delete_LweSample_array(1, axc); delete_LweSample_array(1, bxc); } void zero(LweSample* result, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsCONSTANT(result + i, 0, keyset);} } void NOT(LweSample* result, const LweSample* x, const TFheGateBootstrappingCloudKeySet* keyset, const size_t size) { for(int i = 0; i < size; i++){ bootsNOT(result + i, x + i, keyset);} } void split(LweSample *finalresult, LweSample *finalresult2, LweSample *finalresult3, LweSample *a, LweSample *b, LweSample *c, LweSample *d,LweSample *e, const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; LweSample *sum = new_LweSample_array(32, in_out_params); LweSample *sum2 = new_LweSample_array(32, in_out_params); LweSample *sum3 = new_LweSample_array(32, in_out_params); LweSample *carryover = new_LweSample_array(32, in_out_params); LweSample *carryover2 = new_LweSample_array(32, in_out_params); LweSample *carryover3 = new_LweSample_array(32, in_out_params); for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum + i, 0, keyset); bootsCONSTANT(sum2 + i, 0, keyset); bootsCONSTANT(sum3 + i, 0, keyset); bootsCONSTANT(carryover + i, 0, keyset); bootsCONSTANT(carryover2 + i, 0, keyset); bootsCONSTANT(carryover3 + i, 0, keyset); } //adding the 2nd result with the carry add(sum, carryover, e, b, carry, nb_bits, keyset); add(sum2, carryover2, d, a, carryover, nb_bits, keyset); add(sum3, carryover3, c, carryover2,carry,nb_bits, keyset); for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult + i, sum3 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult2 + i, sum2 + i, keyset); } for (int32_t i = 0; i < nb_bits; ++i) { bootsCOPY(finalresult3 + i, sum + i, keyset); } delete_LweSample_array(32, sum); delete_LweSample_array(32, sum2); delete_LweSample_array(32, sum3); delete_LweSample_array(32, carryover); delete_LweSample_array(32, carryover2); delete_LweSample_array(32, carryover3); } void mul32(LweSample *result, LweSample *result2, LweSample *a, LweSample *b,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsAND(tmp + k, a + k, b + i, keyset); } } if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside for (int32_t i = 0; i < 32 - round; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //the rest of the bits that couldnt fit inside for (int32_t i = 0; i < round; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + 32 - round, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c2 + i, keyset); bootsCOPY(result2 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); } void mul64(LweSample *result, LweSample *result2,LweSample *result3, LweSample *a, LweSample *b,LweSample *c,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsAND(tmp + k, a + k, c + i, keyset); #pragma omp section bootsAND(tmp2 + k, b + k, c + i, keyset); } } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } } //some of tmp2 to remaining of tmp3c2 //repeats 31 times for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } } //the rest of tmp2 to tmp3c3 //repeats 1 time for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c3 + i, keyset); bootsCOPY(result2 + i, sum3c2 + i, keyset); bootsCOPY(result3 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); } void mul128(LweSample *result, LweSample *result2,LweSample *result3,LweSample *result4,LweSample *result5, LweSample *a, LweSample *b,LweSample *c,LweSample *d, LweSample *e,const LweSample *carry, const int32_t nb_bits, TFheGateBootstrappingCloudKeySet *keyset) { const LweParams *in_out_params = keyset->params->in_out_params; //sum of the output LweSample *sum3c1 = new_LweSample_array(32, in_out_params); LweSample *sum3c2 = new_LweSample_array(32, in_out_params); LweSample *sum3c3 = new_LweSample_array(32, in_out_params); LweSample *sum3c4 = new_LweSample_array(32, in_out_params); LweSample *sum3c5 = new_LweSample_array(32, in_out_params); LweSample *tmp = new_LweSample_array(32, in_out_params); LweSample *tmp2 = new_LweSample_array(32, in_out_params); LweSample *tmp3 = new_LweSample_array(32, in_out_params); LweSample *tmp4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c1 = new_LweSample_array(32, in_out_params); LweSample *tmp3c2 = new_LweSample_array(32, in_out_params); LweSample *tmp3c3 = new_LweSample_array(32, in_out_params); LweSample *tmp3c4 = new_LweSample_array(32, in_out_params); LweSample *tmp3c5 = new_LweSample_array(32, in_out_params); LweSample *carry1 = new_LweSample_array(32, in_out_params); LweSample *carry2 = new_LweSample_array(32, in_out_params); LweSample *carry3 = new_LweSample_array(32, in_out_params); LweSample *carry4 = new_LweSample_array(32, in_out_params); LweSample *carry5 = new_LweSample_array(32, in_out_params); //set all these to 0 for (int32_t i = 0; i < nb_bits; ++i) { bootsCONSTANT(sum3c1 + i, 0, keyset); bootsCONSTANT(sum3c2 + i, 0, keyset); bootsCONSTANT(sum3c3 + i, 0, keyset); bootsCONSTANT(sum3c4 + i, 0, keyset); bootsCONSTANT(sum3c5 + i, 0, keyset); bootsCONSTANT(tmp + i, 0, keyset); bootsCONSTANT(tmp2 + i, 0, keyset); bootsCONSTANT(tmp3 + i, 0, keyset); bootsCONSTANT(tmp4 + i, 0, keyset); bootsCONSTANT(tmp3c1 + i, 0, keyset); bootsCONSTANT(tmp3c2 + i, 0, keyset); bootsCONSTANT(tmp3c3 + i, 0, keyset); bootsCONSTANT(tmp3c4 + i, 0, keyset); bootsCONSTANT(tmp3c5 + i, 0, keyset); bootsCONSTANT(carry1 + i, 0, keyset); bootsCONSTANT(carry2 + i, 0, keyset); bootsCONSTANT(carry3 + i, 0, keyset); bootsCONSTANT(carry4 + i, 0, keyset); bootsCONSTANT(carry5 + i, 0, keyset); } //multiply all the bits together with the other bits.. int round = 0; int counter1 = 0; int counter2 = 0; for (int32_t i = 0; i < nb_bits; ++i) { for (int32_t k = 0; k < nb_bits; ++k) { //this is basically multiplying as it is an AND gate //a(ciphertext1) should be the least significant bit #pragma omp parallel sections num_threads(4) { #pragma omp section bootsAND(tmp + k, a + k, e + i, keyset); #pragma omp section bootsAND(tmp2 + k, b + k, e + i, keyset); #pragma omp section bootsAND(tmp3 + k, c + k, e + i, keyset); #pragma omp section bootsAND(tmp4 + k, d + k, e + i, keyset); } } counter1 = 32 - round; counter2 = 32 - counter1; if (round > 0) { for (int32_t i = 0; i < round; ++i) { //putting number of 0s infront bootsCONSTANT(tmp3c1 + i, 0, keyset); } } //copy all the bits that fit into a int32 with the 0s inside //tmp to tmp3c1 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c1 + i + round , tmp + i, keyset); } } //remaining of tmp to tmp3c2 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i, tmp + i + counter1, keyset); } } //some of tmp2 to remaining of tmp3c2 for (int32_t i = 0; i < counter1; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c2 + i + counter2, tmp2 + i, keyset); } } //remaining tmp2 to tmp3c3 for (int32_t i = 0; i < counter2; ++i) { // +round cause infront has the 0s //tmp is the least significant bit #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i, tmp2 + i + counter1, keyset); } } //some of tmp3 to remaining tmp3c3 for (int32_t i = 0; i < counter1; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c3 + i + counter2, tmp3 + i, keyset); } } //rest of tmp3 to tmp3c4 for (int32_t i = 0; i < counter2; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c4 + i, tmp3 + i + counter1, keyset); } } //some of tmp4 to remaining tmp3c4 for (int32_t i = 0; i < counter1; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c4 + i + counter2, tmp4 + i, keyset); } } //rest of tmp4 to tmp3c5 for (int32_t i = 0; i < counter2; ++i) { #pragma omp parallel sections num_threads(2) { #pragma omp section bootsCOPY(tmp3c5 + i, tmp4 + i + counter1, keyset); } } add(sum3c1, carry1, sum3c1, tmp3c1, carry, 32, keyset); add(sum3c2, carry2, sum3c2, tmp3c2, carry1, 32, keyset); add(sum3c3, carry3, sum3c3, tmp3c3, carry2, 32, keyset); add(sum3c4, carry4, sum3c4, tmp3c4, carry3, 32, keyset); add(sum3c5, carry5, sum3c5, tmp3c5, carry4, 32, keyset); round++; } for (int32_t i = 0; i < 32; ++i) { bootsCOPY(result + i, sum3c5 + i, keyset); bootsCOPY(result2 + i, sum3c4 + i, keyset); bootsCOPY(result3 + i, sum3c3 + i, keyset); bootsCOPY(result4 + i, sum3c2 + i, keyset); bootsCOPY(result5 + i, sum3c1 + i, keyset); } delete_LweSample_array(32, sum3c1); delete_LweSample_array(32, sum3c2); delete_LweSample_array(32, sum3c3); delete_LweSample_array(32, sum3c4); delete_LweSample_array(32, sum3c5); delete_LweSample_array(32, tmp); delete_LweSample_array(32, tmp2); delete_LweSample_array(32, tmp3); delete_LweSample_array(32, tmp4); delete_LweSample_array(32, tmp3c1); delete_LweSample_array(32, tmp3c2); delete_LweSample_array(32, tmp3c3); delete_LweSample_array(32, tmp3c4); delete_LweSample_array(32, tmp3c5); delete_LweSample_array(32, carry1); delete_LweSample_array(32, carry2); delete_LweSample_array(32, carry3); delete_LweSample_array(32, carry4); delete_LweSample_array(32, carry5); } int main() { // sidh_cipher_cloud should have already appended 2 cipherstreams into cloud.data printf("Reading the key...\n"); // reads the cloud key from file FILE* cloud_key = fopen("cloud.key", "rb"); TFheGateBootstrappingCloudKeySet* bk = new_tfheGateBootstrappingCloudKeySet_fromFile(cloud_key); fclose(cloud_key); // reads the nbit key from file FILE* nbit_key = fopen("nbit.key","rb"); TFheGateBootstrappingSecretKeySet* nbitkey = new_tfheGateBootstrappingSecretKeySet_fromFile(nbit_key); fclose(nbit_key); // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* params = bk->params; // if necessary, the params are inside the key const TFheGateBootstrappingParameterSet* nbitparams = nbitkey->params; // Create ciphertext blocks for negative1, bit1, negative2, bit2 and values LweSample* ciphertextbit = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit1 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextnegative2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertextbit2 = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* ciphertext1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertext16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* ciphertextcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); printf("Reading input 1...\n"); // reads ciphertexts from cloud.data FILE* cloud_data = fopen("cloud.data", "rb"); for (int i = 0; i<32; i++) // line0 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative1[i], nbitparams); for (int i = 0; i<32; i++) // line1 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit1[i], nbitparams); // Decrypts bit size1 int32_t int_bit1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit1[i],nbitkey)>0; int_bit1 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext1[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext2[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext3[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext4[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext5[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext6[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext7[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext8[i], params); for (int i = 0; i<32; i++) // line10 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry1[i], params); printf("Reading input 2...\n"); for (int i = 0; i<32; i++) // line11 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextnegative2[i], nbitparams); for (int i = 0; i<32; i++) // line12 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextbit2[i], nbitparams); // Decrypts bit size2 int32_t int_bit2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextbit2[i],nbitkey)>0; int_bit2 |= (ai<<i); } for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext9[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext10[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext11[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext12[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext13[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext14[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext15[i], params); for (int i=0; i<32; i++) import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertext16[i], params); for (int i = 0; i<32; i++) // line21 import_gate_bootstrapping_ciphertext_fromFile(cloud_data, &ciphertextcarry2[i], params); printf("Reading operation code...\n"); // Get Operation Code from File int32_t int_op; read.open("operator.txt"); read >> int_op; // Homomorphic encryption to add negative1 and negative2 ciphertexts LweSample* ciphertextnegative = new_gate_bootstrapping_ciphertext_array(32, nbitparams); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); // add(ciphertextnegative, carry1, ciphertextnegative1, ciphertextnegative2, ciphertextcarry1, 32, nbitcloudkey); // NOTE // Decrypts Negative1 int32_t int_negative1 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative1[i],nbitkey)>0; int_negative1 |= (ai<<i); } std::cout << int_negative1 << " => negative1" << "\n"; // convert first value negativity code from 2 to 1 if (int_negative1 == 2){ int_negative1 = 1;} // Decrypts Negative2 int32_t int_negative2 = 0; for (int i=0; i<32; i++) { int ai = bootsSymDecrypt(&ciphertextnegative2[i],nbitkey)>0; int_negative2 |= (ai<<i); } std::cout << int_negative2 << " => negative2" << "\n"; // Add Negatives. // If both v1 & v2 are positive, int_negative = 0 // If only v1 is negative, int_negative = 1 // If only v2 is negative, int_negative = 2 // If both v1 & v2 are negative, int_negative = 3 int32_t int_negative; int_negative = (int_negative1 + int_negative2); // std::cout << int_negative << " -> negatives" << "\n"; //export the negative and bit data for the verif FILE* answer_data = fopen("answer.data", "wb"); // Write negative to answer.data int32_t ciphernegative = 0; if (int_negative == 1){ ciphernegative = 1; } if (int_negative == 2){ ciphernegative = 2; } if (int_negative == 3){ ciphernegative = 4; } for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextnegative[i], (ciphernegative>>i)&1, nbitkey); } for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextnegative[i], nbitparams); std::cout << ciphernegative << " => total negatives" << "\n"; delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative); // Compare bit sizes int32_t int_bit = 0; if (int_op == 4){ if (int_bit1 >= int_bit2){int_bit = (int_bit1 * 2);} else{int_bit = (int_bit2 * 2);} for (int i=0; i<32; i++) { bootsSymEncrypt(&ciphertextbit[i], (int_bit>>i)&1, nbitkey);} for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; if (int_bit1 >= int_bit2){int_bit = int_bit1;} else{int_bit = int_bit2;} } else if (int_bit1 >= int_bit2) { int_bit = int_bit1; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit1[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } else{ int_bit = int_bit2; for (int i = 0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextbit2[i], nbitparams); std::cout << int_bit << " written to answer.data" << "\n"; } fclose(cloud_data); // If trying to multiply a 256 bit number if ((int_op == 4) && (int_bit >= 256)){ std::cout << "Cannot multiply 256 bit number!" << "\n"; fclose(answer_data); return 126; } // Addition //if (the operation is add AND (both numbers are positive OR both numbers are negative)) OR (the operation is subtract AND either number is negative) // A+B, [(-A)+(-B)], A-(-B), (-A)-(B) if ((int_op == 1 && (int_negative != 1 && int_negative != 2 )) || (int_op == 2 && (int_negative == 1 || int_negative == 2))) { if (int_op == 1){ std::cout << int_bit << " bit Addition computation" << "\n"; }else{ std::cout << int_bit << " bit Subtraction computation" << "\n"; } //32 Bit Addition if (int_bit == 32) { // Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); // Timings gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); printf("writing the answer to file...\n"); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //64 Bit Addition if (int_bit == 64) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //128 Bit Addition if (int_bit == 128) { //Ciphertext to hold the result and carry LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); printf("Doing the homomorphic computation...\n"); //Adding component add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); // Timing gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the result ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //Clean up delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //256 Bit Addition if (int_bit == 256) { // do some operations on the ciphertexts: here, we will compute the // addition of the two LweSample* result = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); // Timing struct timeval start, end; double get_time; gettimeofday(&start, NULL); add(result, carry1, ciphertext1, ciphertext9, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, ciphertext10, carry1, 32, bk); add(result3, carry3, ciphertext3, ciphertext11, carry2, 32, bk); add(result4, carry4, ciphertext4, ciphertext12, carry3, 32, bk); add(result5, carry5, ciphertext5, ciphertext13, carry4, 32, bk); add(result6, carry6, ciphertext6, ciphertext14, carry5, 32, bk); add(result7, carry7, ciphertext7, ciphertext15, carry6, 32, bk); add(result8, carry8, ciphertext8, ciphertext16, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); // export the 64 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, carry5); delete_gate_bootstrapping_ciphertext_array(32, carry6); delete_gate_bootstrapping_ciphertext_array(32, carry7); delete_gate_bootstrapping_ciphertext_array(32, carry8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } // Subtraction // If the operation is subtract OR (the operation is addition AND either one of the values are negative) A-B, A+(-B), (-A)+B else if (int_op == 2 || (int_op == 1 && (int_negative == 1 || int_negative == 2))){ // Normal Subtraction computation with no negative numbers A-B OR Addition with 2nd number negative A+(-B) if ((int_op == 2 && int_negative == 0) || (int_op == 1 && int_negative == 2)){ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 2nd value negative" << "\n"; } //32 Bit Subtraction if(int_bit == 32) { printf("Doing the homomorphic computation...\n"); LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //64 Bit Subtraction if(int_bit == 64) { LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //128 Bit Subtraction if(int_bit == 128) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } //256 Bit Subtraction if (int_bit == 256) { // reads the 2x32 ciphertexts from the cloud file printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the second input value NOT(inverse1, ciphertext9, bk, 32); NOT(inverse2, ciphertext10, bk, 32); NOT(inverse3, ciphertext11, bk, 32); NOT(inverse4, ciphertext12, bk, 32); NOT(inverse5, ciphertext13, bk, 32); NOT(inverse6, ciphertext14, bk, 32); NOT(inverse7, ciphertext15, bk, 32); NOT(inverse8, ciphertext16, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext1, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext2, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext3, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext4, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext5, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext6, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext7, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext8, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) //result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) //result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) //result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) //result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) //result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) //result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) //result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } //Addition (for subtraction) with value 1 being a negative number (-A)+B else{ if (int_op == 2){ std::cout << int_bit << " bit Subtraction computation" << "\n"; }else { std::cout << int_bit << " bit Addition computation with 1st value negative" << "\n"; } if(int_bit == 32){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first value to the inversed value of the second value, a + (-b) add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 64){ LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; printf("Doing the homomorphic computation...\n"); gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) //result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value,(-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, carry3); delete_gate_bootstrapping_ciphertext_array(32, carry4); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 256){ printf("Doing the homomorphic computation...\n"); //do some operations on the ciphertexts: here, we will compute the //difference of the two LweSample* temp = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* inverse8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* tempcarry8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twosresult8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* twoscarry8 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //Subtraction Process //Step 1. Inverse the 32 bit chunks in the first input value NOT(inverse1, ciphertext1, bk, 32); NOT(inverse2, ciphertext2, bk, 32); NOT(inverse3, ciphertext3, bk, 32); NOT(inverse4, ciphertext4, bk, 32); NOT(inverse5, ciphertext5, bk, 32); NOT(inverse6, ciphertext6, bk, 32); NOT(inverse7, ciphertext7, bk, 32); NOT(inverse8, ciphertext8, bk, 32); //iniailize tempcarry and temp carry to 0 zero(temp, bk, 32); zero(tempcarry1, bk, 32); zero(tempcarry2, bk, 32); zero(tempcarry3, bk, 32); zero(tempcarry4, bk, 32); zero(tempcarry5, bk, 32); zero(tempcarry6, bk, 32); zero(tempcarry7, bk, 32); zero(tempcarry8, bk, 32); //Assign temp to have a value of 1 for 2nd complement bootsCONSTANT(temp, 1, bk); //Add 1 to inverted add(twosresult1, twoscarry1, inverse1, temp, tempcarry1, 32, bk); //Add the rest of the inverted add(twosresult2, twoscarry2, inverse2, tempcarry2, twoscarry1, 32, bk); add(twosresult3, twoscarry3, inverse3, tempcarry3, twoscarry2, 32, bk); add(twosresult4, twoscarry4, inverse4, tempcarry4, twoscarry3, 32, bk); add(twosresult5, twoscarry5, inverse5, tempcarry5, twoscarry4, 32, bk); add(twosresult6, twoscarry6, inverse6, tempcarry6, twoscarry5, 32, bk); add(twosresult7, twoscarry7, inverse7, tempcarry7, twoscarry6, 32, bk); add(twosresult8, twoscarry8, inverse8, tempcarry8, twoscarry7, 32, bk); LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carry8 = new_gate_bootstrapping_ciphertext_array(32, params); //Do the addition, this is basically adding the first inversed value to the second value, (-a) + b add(result1, carry1, ciphertext9, twosresult1, ciphertextcarry1, 32, bk); add(result2, carry2, ciphertext10, twosresult2, carry1, 32, bk); add(result3, carry3, ciphertext11, twosresult3, carry2, 32, bk); add(result4, carry4, ciphertext12, twosresult4, carry3, 32, bk); add(result5, carry5, ciphertext13, twosresult5, carry4, 32, bk); add(result6, carry6, ciphertext14, twosresult6, carry5, 32, bk); add(result7, carry7, ciphertext15, twosresult7, carry6, 32, bk); add(result8, carry8, ciphertext16, twosresult8, carry7, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); printf("Writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // 1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result4[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result6[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result7[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result8[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); //clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, temp); delete_gate_bootstrapping_ciphertext_array(32, inverse1); delete_gate_bootstrapping_ciphertext_array(32, inverse2); delete_gate_bootstrapping_ciphertext_array(32, inverse3); delete_gate_bootstrapping_ciphertext_array(32, inverse4); delete_gate_bootstrapping_ciphertext_array(32, inverse5); delete_gate_bootstrapping_ciphertext_array(32, inverse6); delete_gate_bootstrapping_ciphertext_array(32, inverse7); delete_gate_bootstrapping_ciphertext_array(32, inverse8); delete_gate_bootstrapping_ciphertext_array(32, tempcarry1); delete_gate_bootstrapping_ciphertext_array(32, tempcarry2); delete_gate_bootstrapping_ciphertext_array(32, tempcarry3); delete_gate_bootstrapping_ciphertext_array(32, tempcarry4); delete_gate_bootstrapping_ciphertext_array(32, tempcarry5); delete_gate_bootstrapping_ciphertext_array(32, tempcarry6); delete_gate_bootstrapping_ciphertext_array(32, tempcarry7); delete_gate_bootstrapping_ciphertext_array(32, tempcarry8); delete_gate_bootstrapping_ciphertext_array(32, twosresult1); delete_gate_bootstrapping_ciphertext_array(32, twosresult2); delete_gate_bootstrapping_ciphertext_array(32, twosresult3); delete_gate_bootstrapping_ciphertext_array(32, twosresult4); delete_gate_bootstrapping_ciphertext_array(32, twosresult5); delete_gate_bootstrapping_ciphertext_array(32, twosresult6); delete_gate_bootstrapping_ciphertext_array(32, twosresult7); delete_gate_bootstrapping_ciphertext_array(32, twosresult8); delete_gate_bootstrapping_ciphertext_array(32, twoscarry1); delete_gate_bootstrapping_ciphertext_array(32, twoscarry2); delete_gate_bootstrapping_ciphertext_array(32, twoscarry3); delete_gate_bootstrapping_ciphertext_array(32, twoscarry4); delete_gate_bootstrapping_ciphertext_array(32, twoscarry5); delete_gate_bootstrapping_ciphertext_array(32, twoscarry6); delete_gate_bootstrapping_ciphertext_array(32, twoscarry7); delete_gate_bootstrapping_ciphertext_array(32, twoscarry8); delete_gate_bootstrapping_ciphertext_array(32, carry1); delete_gate_bootstrapping_ciphertext_array(32, carry2); delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext5); delete_gate_bootstrapping_ciphertext_array(32, ciphertext6); delete_gate_bootstrapping_ciphertext_array(32, ciphertext7); delete_gate_bootstrapping_ciphertext_array(32, ciphertext8); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertext13); delete_gate_bootstrapping_ciphertext_array(32, ciphertext14); delete_gate_bootstrapping_ciphertext_array(32, ciphertext15); delete_gate_bootstrapping_ciphertext_array(32, ciphertext16); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } } // If Multiplication else if (int_op == 4){ std::cout << int_bit << " bit Multiplication computation" << "\n"; if (int_bit == 128){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result16 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result17 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result18 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result19 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result20 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* sum15 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover7 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover8 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover9 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover10 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover11 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover12 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover13 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover14 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* carryover15 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul128(result1, result2, result3, result4, result5, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul128(result6, result7, result8, result9, result10, ciphertext1, ciphertext2, ciphertext3, ciphertext4, ciphertext10,ciphertextcarry1, 32, bk); //result3 mul128(result11, result12, result13, result14, result15, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext11,ciphertextcarry1, 32, bk); //result4 mul128(result16,result17, result18,result19,result20, ciphertext1, ciphertext2,ciphertext3,ciphertext4,ciphertext12,ciphertextcarry1, 32, bk); add(sum1, carryover1, result10, result4, ciphertextcarry1, 32, bk); add(sum2, carryover2, result9, result3,carryover1,32, bk); add(sum3, carryover3, result8, result2,carryover2,32, bk); add(sum4, carryover4, result7, result1,carryover3,32, bk); add(sum5, carryover5, result6, ciphertextcarry1,carryover4,32, bk); add(sum6, carryover6, sum2, result15,carryover5,32, bk); add(sum7, carryover7, sum3, result14,carryover6,32, bk); add(sum8, carryover8, sum4, result13,carryover7,32, bk); add(sum9, carryover9, sum5, result12,carryover8,32, bk); add(sum10, carryover10, result11, ciphertextcarry1,carryover9,32, bk); add(sum11, carryover11, sum7, result20,carryover10,32, bk); add(sum12, carryover12, sum8, result19,carryover11,32, bk); add(sum13, carryover13, sum9, result18,carryover12,32, bk); add(sum14, carryover14, sum10, result17,carryover13,32, bk); add(sum15, carryover15, result16 , ciphertextcarry1,carryover14,32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result5[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum1[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum6[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum11[i], params); for (int i=0; i<32; i++) // result5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum12[i], params); for (int i=0; i<32; i++) // result6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum13[i], params); for (int i=0; i<32; i++) // result7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum14[i], params); for (int i=0; i<32; i++) // result8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &sum15[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, result7); delete_gate_bootstrapping_ciphertext_array(32, result8); delete_gate_bootstrapping_ciphertext_array(32, result9); delete_gate_bootstrapping_ciphertext_array(32, result10); delete_gate_bootstrapping_ciphertext_array(32, result11); delete_gate_bootstrapping_ciphertext_array(32, result12); delete_gate_bootstrapping_ciphertext_array(32, result13); delete_gate_bootstrapping_ciphertext_array(32, result14); delete_gate_bootstrapping_ciphertext_array(32, result15); delete_gate_bootstrapping_ciphertext_array(32, result16); delete_gate_bootstrapping_ciphertext_array(32, result17); delete_gate_bootstrapping_ciphertext_array(32, result18); delete_gate_bootstrapping_ciphertext_array(32, result19); delete_gate_bootstrapping_ciphertext_array(32, result20); delete_gate_bootstrapping_ciphertext_array(32, sum1); delete_gate_bootstrapping_ciphertext_array(32, sum2); delete_gate_bootstrapping_ciphertext_array(32, sum3); delete_gate_bootstrapping_ciphertext_array(32, sum4); delete_gate_bootstrapping_ciphertext_array(32, sum5); delete_gate_bootstrapping_ciphertext_array(32, sum6); delete_gate_bootstrapping_ciphertext_array(32, sum7); delete_gate_bootstrapping_ciphertext_array(32, sum8); delete_gate_bootstrapping_ciphertext_array(32, sum9); delete_gate_bootstrapping_ciphertext_array(32, sum10); delete_gate_bootstrapping_ciphertext_array(32, sum11); delete_gate_bootstrapping_ciphertext_array(32, sum12); delete_gate_bootstrapping_ciphertext_array(32, sum13); delete_gate_bootstrapping_ciphertext_array(32, sum14); delete_gate_bootstrapping_ciphertext_array(32, sum15); delete_gate_bootstrapping_ciphertext_array(32, carryover1); delete_gate_bootstrapping_ciphertext_array(32, carryover2); delete_gate_bootstrapping_ciphertext_array(32, carryover3); delete_gate_bootstrapping_ciphertext_array(32, carryover4); delete_gate_bootstrapping_ciphertext_array(32, carryover5); delete_gate_bootstrapping_ciphertext_array(32, carryover6); delete_gate_bootstrapping_ciphertext_array(32, carryover7); delete_gate_bootstrapping_ciphertext_array(32, carryover8); delete_gate_bootstrapping_ciphertext_array(32, carryover9); delete_gate_bootstrapping_ciphertext_array(32, carryover10); delete_gate_bootstrapping_ciphertext_array(32, carryover11); delete_gate_bootstrapping_ciphertext_array(32, carryover12); delete_gate_bootstrapping_ciphertext_array(32, carryover13); delete_gate_bootstrapping_ciphertext_array(32, carryover14); delete_gate_bootstrapping_ciphertext_array(32, carryover15); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext3); delete_gate_bootstrapping_ciphertext_array(32, ciphertext4); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertext11); delete_gate_bootstrapping_ciphertext_array(32, ciphertext12); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 64){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result3 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result4 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result5 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result6 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult2 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* finalresult3 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul64(result1,result2, result3, ciphertext1, ciphertext2,ciphertext9,ciphertextcarry1, 32, bk); //result2 mul64(result4,result5, result6, ciphertext1, ciphertext2,ciphertext10,ciphertextcarry1, 32, bk); split(finalresult,finalresult2, finalresult3, result1, result2,result4,result5,result6,ciphertextcarry1,32,bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result3[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult3[i], params); for (int i=0; i<32; i++) // result3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult2[i], params); for (int i=0; i<32; i++) // result4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &finalresult[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, result3); delete_gate_bootstrapping_ciphertext_array(32, result4); delete_gate_bootstrapping_ciphertext_array(32, result5); delete_gate_bootstrapping_ciphertext_array(32, result6); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertext10); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_ciphertext_array(32, finalresult); delete_gate_bootstrapping_ciphertext_array(32, finalresult2); delete_gate_bootstrapping_ciphertext_array(32, finalresult3); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } else if (int_bit == 32){ printf("Doing the homomorphic computation...\n"); // do some operations on the ciphertexts: here, we will compute the // product of the two LweSample* result1 = new_gate_bootstrapping_ciphertext_array(32, params); LweSample* result2 = new_gate_bootstrapping_ciphertext_array(32, params); struct timeval start, end; double get_time; gettimeofday(&start, NULL); //result1 mul32(result1,result2,ciphertext1, ciphertext9,ciphertextcarry1, 32, bk); gettimeofday(&end, NULL); get_time = (end.tv_sec - start.tv_sec) + (end.tv_usec - start.tv_usec) * 1.0E-6; printf("Computation Time: %lf[sec]\n", get_time); // write computation time to file FILE *t_file; t_file = fopen(T_FILE, "a"); fprintf(t_file, "%lf\n", get_time); fclose(t_file); printf("writing the answer to file...\n"); //export the 32 ciphertexts to a file (for the cloud) for (int i=0; i<32; i++) // result1 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result2[i], params); for (int i=0; i<32; i++) // result2 export_gate_bootstrapping_ciphertext_toFile(answer_data, &result1[i], params); for (int i=0; i<32; i++) // 3 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 4 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 5 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 6 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 7 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // 8 export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); for (int i=0; i<32; i++) // carry export_gate_bootstrapping_ciphertext_toFile(answer_data, &ciphertextcarry1[i], params); fclose(answer_data); // clean up all pointers delete_gate_bootstrapping_ciphertext_array(32, result1); delete_gate_bootstrapping_ciphertext_array(32, result2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative1); delete_gate_bootstrapping_ciphertext_array(32, ciphertextbit2); delete_gate_bootstrapping_ciphertext_array(32, ciphertextnegative2); delete_gate_bootstrapping_ciphertext_array(32, ciphertext1); delete_gate_bootstrapping_ciphertext_array(32, ciphertext9); delete_gate_bootstrapping_ciphertext_array(32, ciphertextcarry1); delete_gate_bootstrapping_cloud_keyset(bk); delete_gate_bootstrapping_secret_keyset(nbitkey); } } }
queue.h
// -*- C++ -*- // Copyright (C) 2007-2014 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/queue.h * @brief Lock-free double-ended queue. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUEUE_H #define _GLIBCXX_PARALLEL_QUEUE_H 1 #include <parallel/types.h> #include <parallel/base.h> #include <parallel/compatibility.h> /** @brief Decide whether to declare certain variable volatile in this file. */ #define _GLIBCXX_VOLATILE volatile namespace __gnu_parallel { /**@brief Double-ended queue of bounded size, allowing lock-free * atomic access. push_front() and pop_front() must not be called * concurrently to each other, while pop_back() can be called * concurrently at all times. * @c empty(), @c size(), and @c top() are intentionally not provided. * Calling them would not make sense in a concurrent setting. * @param _Tp Contained element type. */ template<typename _Tp> class _RestrictedBoundedConcurrentQueue { private: /** @brief Array of elements, seen as cyclic buffer. */ _Tp* _M_base; /** @brief Maximal number of elements contained at the same time. */ _SequenceIndex _M_max_size; /** @brief Cyclic __begin and __end pointers contained in one atomically changeable value. */ _GLIBCXX_VOLATILE _CASable _M_borders; public: /** @brief Constructor. Not to be called concurrent, of course. * @param __max_size Maximal number of elements to be contained. */ _RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size) { _M_max_size = __max_size; _M_base = new _Tp[__max_size]; _M_borders = __encode2(0, 0); #pragma omp flush } /** @brief Destructor. Not to be called concurrent, of course. */ ~_RestrictedBoundedConcurrentQueue() { delete[] _M_base; } /** @brief Pushes one element into the queue at the front end. * Must not be called concurrently with pop_front(). */ void push_front(const _Tp& __t) { _CASable __former_borders = _M_borders; int __former_front, __former_back; __decode2(__former_borders, __former_front, __former_back); *(_M_base + __former_front % _M_max_size) = __t; #if _GLIBCXX_ASSERTIONS // Otherwise: front - back > _M_max_size eventually. _GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back) <= _M_max_size); #endif __fetch_and_add(&_M_borders, __encode2(1, 0)); } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_front(_Tp& __t) { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front - 1, __former_back); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + (__former_front - 1) % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_back(_Tp& __t) //queue behavior { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { // Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front, __former_back + 1); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + __former_back % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } }; } //namespace __gnu_parallel #undef _GLIBCXX_VOLATILE #endif /* _GLIBCXX_PARALLEL_QUEUE_H */
// -*-C++ - *- //Copyright(C) 2007 - 2014 Free Software Foundation, Inc. // //This file is part of the GNU ISO C++ Library.This library is free // software; you can redistribute it and / or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or(at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; //see the files COPYING3 and COPYING.RUNTIME respectively.If not, see // <http: //www.gnu.org / licenses / >. /** @file parallel/queue.h * @brief Lock-free double-ended queue. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUEUE_H #define _GLIBCXX_PARALLEL_QUEUE_H 1 #include <parallel/types.h> #include <parallel/base.h> #include <parallel/compatibility.h> /** @brief Decide whether to declare certain variable volatile in this file. */ #define _GLIBCXX_VOLATILE volatile namespace __gnu_parallel { /**@brief Double-ended queue of bounded size, allowing lock-free * atomic access. push_front() and pop_front() must not be called * concurrently to each other, while pop_back() can be called * concurrently at all times. * @c empty(), @c size(), and @c top() are intentionally not provided. * Calling them would not make sense in a concurrent setting. * @param _Tp Contained element type. */ template < typename _Tp > class _RestrictedBoundedConcurrentQueue { private: /** @brief Array of elements, seen as cyclic buffer. */ _Tp * _M_base; /** @brief Maximal number of elements contained at the same time. */ _SequenceIndex _M_max_size; /** @brief Cyclic __begin and __end pointers contained in one atomically changeable value. */ _GLIBCXX_VOLATILE _CASable _M_borders; public: /** @brief Constructor. Not to be called concurrent, of course. * @param __max_size Maximal number of elements to be contained. */ _RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size) { _M_max_size = __max_size; _M_base = new _Tp[__max_size]; _M_borders = __encode2(0, 0); } /** @brief Destructor. Not to be called concurrent, of course. */ ~_RestrictedBoundedConcurrentQueue() { delete[] _M_base; } /** @brief Pushes one element into the queue at the front end. * Must not be called concurrently with pop_front(). */ void push_front(const _Tp & __t) { _CASable __former_borders = _M_borders; int __former_front, __former_back; __decode2(__former_borders, __former_front, __former_back); *(_M_base + __former_front % _M_max_size) = __t; #if _GLIBCXX_ASSERTIONS //Otherwise: front - back > _M_max_size eventually. _GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back) <= _M_max_size); #endif __fetch_and_add(&_M_borders, __encode2(1, 0)); } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_front(_Tp & __t) { int __former_front, __former_back; __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { //Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front - 1, __former_back); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + (__former_front - 1) % _M_max_size); return true; } __decode2(_M_borders, __former_front, __former_back); } return false; } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_back(_Tp & __t) // queue behavior { int __former_front, __former_back; __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { //Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front, __former_back + 1); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + __former_back % _M_max_size); return true; } __decode2(_M_borders, __former_front, __former_back); } return false; } }; } //namespace __gnu_parallel #undef _GLIBCXX_VOLATILE #endif /* _GLIBCXX_PARALLEL_QUEUE_H */
// -*-C++ - *- //Copyright(C) 2007 - 2014 Free Software Foundation, Inc. // //This file is part of the GNU ISO C++ Library.This library is free // software; you can redistribute it and / or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or(at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; //see the files COPYING3 and COPYING.RUNTIME respectively.If not, see // <http: //www.gnu.org / licenses / >. /** @file parallel/queue.h * @brief Lock-free double-ended queue. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUEUE_H #define _GLIBCXX_PARALLEL_QUEUE_H 1 #include <parallel/types.h> #include <parallel/base.h> #include <parallel/compatibility.h> /** @brief Decide whether to declare certain variable volatile in this file. */ #define _GLIBCXX_VOLATILE volatile namespace __gnu_parallel { /**@brief Double-ended queue of bounded size, allowing lock-free * atomic access. push_front() and pop_front() must not be called * concurrently to each other, while pop_back() can be called * concurrently at all times. * @c empty(), @c size(), and @c top() are intentionally not provided. * Calling them would not make sense in a concurrent setting. * @param _Tp Contained element type. */ template < typename _Tp > class _RestrictedBoundedConcurrentQueue { private: /** @brief Array of elements, seen as cyclic buffer. */ _Tp * _M_base; /** @brief Maximal number of elements contained at the same time. */ _SequenceIndex _M_max_size; /** @brief Cyclic __begin and __end pointers contained in one atomically changeable value. */ _GLIBCXX_VOLATILE _CASable _M_borders; public: /** @brief Constructor. Not to be called concurrent, of course. * @param __max_size Maximal number of elements to be contained. */ _RestrictedBoundedConcurrentQueue(_SequenceIndex __max_size) { _M_max_size = __max_size; _M_base = new _Tp[__max_size]; _M_borders = __encode2(0, 0); #pragma omp flush } /** @brief Destructor. Not to be called concurrent, of course. */ ~_RestrictedBoundedConcurrentQueue() { delete[] _M_base; } /** @brief Pushes one element into the queue at the front end. * Must not be called concurrently with pop_front(). */ void push_front(const _Tp & __t) { _CASable __former_borders = _M_borders; int __former_front, __former_back; __decode2(__former_borders, __former_front, __former_back); *(_M_base + __former_front % _M_max_size) = __t; #if _GLIBCXX_ASSERTIONS //Otherwise: front - back > _M_max_size eventually. _GLIBCXX_PARALLEL_ASSERT(((__former_front + 1) - __former_back) <= _M_max_size); #endif __fetch_and_add(&_M_borders, __encode2(1, 0)); } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_front(_Tp & __t) { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { //Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front - 1, __former_back); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + (__former_front - 1) % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_back(_Tp & __t) // queue behavior { int __former_front, __former_back; #pragma omp flush __decode2(_M_borders, __former_front, __former_back); while (__former_front > __former_back) { //Chance. _CASable __former_borders = __encode2(__former_front, __former_back); _CASable __new_borders = __encode2(__former_front, __former_back + 1); if (__compare_and_swap(&_M_borders, __former_borders, __new_borders)) { __t = *(_M_base + __former_back % _M_max_size); return true; } #pragma omp flush __decode2(_M_borders, __former_front, __former_back); } return false; } }; } //namespace __gnu_parallel #undef _GLIBCXX_VOLATILE #endif /* _GLIBCXX_PARALLEL_QUEUE_H */
initAtoms.c
/// \file /// Initialize the atom configuration. #include "initAtoms.h" #include <math.h> #include <assert.h> #include "constants.h" #include "decomposition.h" #include "parallel.h" #include "random.h" #include "linkCells.h" #include "timestep.h" #include "memUtils.h" #include "performanceTimers.h" static void computeVcm(SimFlat* s, real_t vcm[3]); /// \details /// Call functions such as createFccLattice and setTemperature to set up /// initial atom positions and momenta. Atoms* initAtoms(LinkCell* boxes) { Atoms* atoms = comdMalloc(sizeof(Atoms)); int maxTotalAtoms = MAXATOMS*boxes->nTotalBoxes; //#pragma sst delete { atoms->gid = (int*) comdMalloc(maxTotalAtoms*sizeof(int)); atoms->iSpecies = (int*) comdMalloc(maxTotalAtoms*sizeof(int)); atoms->r = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3)); atoms->p = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3)); atoms->f = (real3*) comdMalloc(maxTotalAtoms*sizeof(real3)); atoms->U = (real_t*)comdMalloc(maxTotalAtoms*sizeof(real_t)); } atoms->nLocal = 0; atoms->nGlobal = 0; #pragma sst compute for (int iOff = 0; iOff < maxTotalAtoms; iOff++) { atoms->gid[iOff] = 0; atoms->iSpecies[iOff] = 0; zeroReal3(atoms->r[iOff]); zeroReal3(atoms->p[iOff]); zeroReal3(atoms->f[iOff]); atoms->U[iOff] = 0.; } return atoms; } void destroyAtoms(Atoms *atoms) { freeMe(atoms,gid); freeMe(atoms,iSpecies); freeMe(atoms,r); freeMe(atoms,p); freeMe(atoms,f); freeMe(atoms,U); comdFree(atoms); } /// Creates atom positions on a face centered cubic (FCC) lattice with /// nx * ny * nz unit cells and lattice constant lat. /// Set momenta to zero. void createFccLattice(int nx, int ny, int nz, real_t lat, SimFlat* s) { const real_t* localMin = s->domain->localMin; // alias const real_t* localMax = s->domain->localMax; // alias int nb = 4; // number of atoms in the basis real3 basis[4] = { {0.25, 0.25, 0.25}, {0.25, 0.75, 0.75}, {0.75, 0.25, 0.75}, {0.75, 0.75, 0.25} }; // create and place atoms int begin[3]; int end[3]; for (int ii=0; ii<3; ++ii) { begin[ii] = floor(localMin[ii]/lat); end[ii] = ceil (localMax[ii]/lat); } real_t px,py,pz; px=py=pz=0.0; #pragma sst compute for (int ix=begin[0]; ix<end[0]; ++ix) for (int iy=begin[1]; iy<end[1]; ++iy) for (int iz=begin[2]; iz<end[2]; ++iz) for (int ib=0; ib<nb; ++ib) { real_t rx = (ix+basis[ib][0]) * lat; real_t ry = (iy+basis[ib][1]) * lat; real_t rz = (iz+basis[ib][2]) * lat; if (rx < localMin[0] || rx >= localMax[0]) continue; if (ry < localMin[1] || ry >= localMax[1]) continue; if (rz < localMin[2] || rz >= localMax[2]) continue; int id = ib+nb*(iz+nz*(iy+ny*(ix))); putAtomInBox(s->boxes, s->atoms, id, 0, rx, ry, rz, px, py, pz); } #pragma sst init ((int64_t)nb*nx)*((int64_t)(ny*nz)) s->atoms->nGlobal = 0; if (getMyRank() == 0) printf("nb=%d nx=%d ny=%d nz=%d nr=%d nglbl=%lld\n", nb, nx, ny, nz, getNRanks(), s->atoms->nGlobal); #pragma sst init s->atoms->nGlobal / getNRanks() s->atoms->nLocal = s->atoms->nLocal; s->boxes->nTotalAtoms = s->atoms->nLocal; // set total atoms in simulation startTimer(commReduceTimer); addIntParallel(&s->atoms->nLocal, &s->atoms->nGlobal, 1); stopTimer(commReduceTimer); #pragma sst delete assert(s->atoms->nGlobal == nb*nx*ny*nz); } /// Sets the center of mass velocity of the system. /// \param [in] newVcm The desired center of mass velocity. void setVcm(SimFlat* s, real_t newVcm[3]) { real_t oldVcm[3]; computeVcm(s, oldVcm); real_t vShift[3]; vShift[0] = (newVcm[0] - oldVcm[0]); vShift[1] = (newVcm[1] - oldVcm[1]); vShift[2] = (newVcm[2] - oldVcm[2]); int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; #pragma omp parallel for for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff) { int iSpecies = s->atoms->iSpecies[iOff]; real_t mass = s->species[iSpecies].mass; s->atoms->p[iOff][0] += mass * vShift[0]; s->atoms->p[iOff][1] += mass * vShift[1]; s->atoms->p[iOff][2] += mass * vShift[2]; } } } /// Sets the temperature of system. /// /// Selects atom velocities randomly from a boltzmann (equilibrium) /// distribution that corresponds to the specified temperature. This /// random process will typically result in a small, but non zero center /// of mass velocity and a small difference from the specified /// temperature. For typical MD runs these small differences are /// unimportant, However, to avoid possible confusion, we set the center /// of mass velocity to zero and scale the velocities to exactly match /// the input temperature. void setTemperature(SimFlat* s, real_t temperature) { s->initialTemp = temperature; int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; // set initial velocities for the distribution #pragma omp parallel for for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff) { int iType = s->atoms->iSpecies[iOff]; real_t mass = s->species[iType].mass; real_t sigma = sqrt(kB_eV * temperature/mass); uint64_t seed = mkSeed(s->atoms->gid[iOff], 123); s->atoms->p[iOff][0] = mass * sigma * gasdev(&seed); s->atoms->p[iOff][1] = mass * sigma * gasdev(&seed); s->atoms->p[iOff][2] = mass * sigma * gasdev(&seed); } } // compute the resulting temperature // kinetic energy = 3/2 kB * Temperature if (temperature == 0.0) return; real_t vZero[3] = {0., 0., 0.}; setVcm(s, vZero); kineticEnergy(s); real_t temp = (s->eKinetic/s->atoms->nGlobal)/kB_eV/1.5; // scale the velocities to achieve the target temperature real_t scaleFactor = sqrt(temperature/temp); #pragma omp parallel for for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff) { s->atoms->p[iOff][0] *= scaleFactor; s->atoms->p[iOff][1] *= scaleFactor; s->atoms->p[iOff][2] *= scaleFactor; } } kineticEnergy(s); temp = s->eKinetic/s->atoms->nGlobal/kB_eV/1.5; } /// Add a random displacement to the atom positions. /// Atoms are displaced by a random distance in the range /// [-delta, +delta] along each axis. /// \param [in] delta The maximum displacement (along each axis). void randomDisplacements(SimFlat* s, real_t delta) { int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; #pragma omp parallel for for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff) { uint64_t seed = mkSeed(s->atoms->gid[iOff], 457); s->atoms->r[iOff][0] += (2.0*lcg61(&seed)-1.0) * delta; s->atoms->r[iOff][1] += (2.0*lcg61(&seed)-1.0) * delta; s->atoms->r[iOff][2] += (2.0*lcg61(&seed)-1.0) * delta; } } } /// Computes the center of mass velocity of the system. void computeVcm(SimFlat* s, real_t vcm[3]) { real_t vcmLocal[4] = {0., 0., 0., 0.}; real_t vcmSum[4] = {0., 0., 0., 0.}; real_t v0 = 0.0; real_t v1 = 0.0; real_t v2 = 0.0; real_t v3 = 0.0; int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; // sum the momenta and particle masses #pragma omp parallel for reduction(+:v0) reduction(+:v1) reduction(+:v2) reduction(+:v3) for (int iBox=0; iBox<s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff=MAXATOMS*iBox, ii=0; ii<s->boxes->nAtoms[iBox]; ++ii, ++iOff) { v0 += s->atoms->p[iOff][0]; v1 += s->atoms->p[iOff][1]; v2 += s->atoms->p[iOff][2]; int iSpecies = s->atoms->iSpecies[iOff]; v3 += s->species[iSpecies].mass; } } vcmLocal[0] = v0; vcmLocal[1] = v1; vcmLocal[2] = v2; vcmLocal[3] = v3; startTimer(commReduceTimer); addRealParallel(vcmLocal, vcmSum, 4); stopTimer(commReduceTimer); real_t totalMass = vcmSum[3]; vcm[0] = vcmSum[0]/totalMass; vcm[1] = vcmSum[1]/totalMass; vcm[2] = vcmSum[2]/totalMass; }
/// \file /// Initialize the atom configuration. #include "initAtoms.h" #include <math.h> #include <assert.h> #include "constants.h" #include "decomposition.h" #include "parallel.h" #include "random.h" #include "linkCells.h" #include "timestep.h" #include "memUtils.h" #include "performanceTimers.h" static void computeVcm(SimFlat * s, real_t vcm[3]); ///\details /// Call functions such as createFccLattice and setTemperature to set up /// initial atom positions and momenta. Atoms * initAtoms(LinkCell * boxes) { Atoms *atoms = comdMalloc(sizeof(Atoms)); int maxTotalAtoms = MAXATOMS * boxes->nTotalBoxes; // #pragma sst delete { atoms->gid = (int *)comdMalloc(maxTotalAtoms * sizeof(int)); atoms->iSpecies = (int *)comdMalloc(maxTotalAtoms * sizeof(int)); atoms->r = (real3 *) comdMalloc(maxTotalAtoms * sizeof(real3)); atoms->p = (real3 *) comdMalloc(maxTotalAtoms * sizeof(real3)); atoms->f = (real3 *) comdMalloc(maxTotalAtoms * sizeof(real3)); atoms->U = (real_t *) comdMalloc(maxTotalAtoms * sizeof(real_t)); } atoms->nLocal = 0; atoms->nGlobal = 0; #pragma sst compute for (int iOff = 0; iOff < maxTotalAtoms; iOff++) { atoms->gid[iOff] = 0; atoms->iSpecies[iOff] = 0; zeroReal3(atoms->r[iOff]); zeroReal3(atoms->p[iOff]); zeroReal3(atoms->f[iOff]); atoms->U[iOff] = 0.; } return atoms; } void destroyAtoms(Atoms * atoms) { freeMe(atoms, gid); freeMe(atoms, iSpecies); freeMe(atoms, r); freeMe(atoms, p); freeMe(atoms, f); freeMe(atoms, U); comdFree(atoms); } ///Creates atom positions on a face centered cubic(FCC) lattice with /// nx * ny * nz unit cells and lattice constant lat. /// Set momenta to zero. void createFccLattice(int nx, int ny, int nz, real_t lat, SimFlat * s) { const real_t *localMin = s->domain->localMin; //alias const real_t *localMax = s->domain->localMax; //alias int nb = 4; //number of atoms in the basis real3 basis[4] = { { 0.25, 0.25, 0.25 }, { 0.25, 0.75, 0.75 }, { 0.75, 0.25, 0.75 }, { 0.75, 0.75, 0.25 } }; //create and place atoms int begin[3]; int end[3]; for (int ii = 0; ii < 3; ++ii) { begin[ii] = floor(localMin[ii] / lat); end[ii] = ceil(localMax[ii] / lat); } real_t px, py, pz; px = py = pz = 0.0; #pragma sst compute for (int ix = begin[0]; ix < end[0]; ++ix) for (int iy = begin[1]; iy < end[1]; ++iy) for (int iz = begin[2]; iz < end[2]; ++iz) for (int ib = 0; ib < nb; ++ib) { real_t rx = (ix + basis[ib][0]) * lat; real_t ry = (iy + basis[ib][1]) * lat; real_t rz = (iz + basis[ib][2]) * lat; if (rx < localMin[0] || rx >= localMax[0]) continue; if (ry < localMin[1] || ry >= localMax[1]) continue; if (rz < localMin[2] || rz >= localMax[2]) continue; int id = ib + nb * (iz + nz * (iy + ny * (ix))); putAtomInBox(s->boxes, s->atoms, id, 0, rx, ry, rz, px, py, pz); } #pragma sst init ((int64_t)nb*nx)*((int64_t)(ny*nz)) s->atoms->nGlobal = 0; if (getMyRank() == 0) printf("nb=%d nx=%d ny=%d nz=%d nr=%d nglbl=%lld\n", nb, nx, ny, nz, getNRanks(), s->atoms->nGlobal); #pragma sst init s->atoms->nGlobal / getNRanks() s->atoms->nLocal = s->atoms->nLocal; s->boxes->nTotalAtoms = s->atoms->nLocal; //set total atoms in simulation startTimer(commReduceTimer); addIntParallel(&s->atoms->nLocal, &s->atoms->nGlobal, 1); stopTimer(commReduceTimer); #pragma sst delete assert(s->atoms->nGlobal == nb * nx * ny * nz); } ///Sets the center of mass velocity of the system. /// \param[in] newVcm The desired center of mass velocity. void setVcm(SimFlat * s, real_t newVcm[3]) { real_t oldVcm[3]; computeVcm(s, oldVcm); real_t vShift[3]; vShift[0] = (newVcm[0] - oldVcm[0]); vShift[1] = (newVcm[1] - oldVcm[1]); vShift[2] = (newVcm[2] - oldVcm[2]); int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { int iSpecies = s->atoms->iSpecies[iOff]; real_t mass = s->species[iSpecies].mass; s->atoms->p[iOff][0] += mass * vShift[0]; s->atoms->p[iOff][1] += mass * vShift[1]; s->atoms->p[iOff][2] += mass * vShift[2]; } } } ///Sets the temperature of system. /// ///Selects atom velocities randomly from a boltzmann(equilibrium) /// distribution that corresponds to the specified temperature.This /// random process will typically result in a small, but non zero center /// of mass velocity and a small difference from the specified /// temperature.For typical MD runs these small differences are /// unimportant, However, to avoid possible confusion, we set the center /// of mass velocity to zero and scale the velocities to exactly match /// the input temperature. void setTemperature(SimFlat * s, real_t temperature) { s->initialTemp = temperature; int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; //set initial velocities for the distribution for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { int iType = s->atoms->iSpecies[iOff]; real_t mass = s->species[iType].mass; real_t sigma = sqrt(kB_eV * temperature / mass); uint64_t seed = mkSeed(s->atoms->gid[iOff], 123); s->atoms->p[iOff][0] = mass * sigma * gasdev(&seed); s->atoms->p[iOff][1] = mass * sigma * gasdev(&seed); s->atoms->p[iOff][2] = mass * sigma * gasdev(&seed); } } //compute the resulting temperature // kinetic energy = 3 / 2 kB * Temperature if (temperature == 0.0) return; real_t vZero[3] = {0., 0., 0.}; setVcm(s, vZero); kineticEnergy(s); real_t temp = (s->eKinetic / s->atoms->nGlobal) / kB_eV / 1.5; //scale the velocities to achieve the target temperature real_t scaleFactor = sqrt(temperature / temp); for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { s->atoms->p[iOff][0] *= scaleFactor; s->atoms->p[iOff][1] *= scaleFactor; s->atoms->p[iOff][2] *= scaleFactor; } } kineticEnergy(s); temp = s->eKinetic / s->atoms->nGlobal / kB_eV / 1.5; } ///Add a random displacement to the atom positions. /// Atoms are displaced by a random distance in the range ///[-delta, +delta] along each axis. /// \param[in] delta The maximum displacement(along each axis). void randomDisplacements(SimFlat * s, real_t delta) { int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { uint64_t seed = mkSeed(s->atoms->gid[iOff], 457); s->atoms->r[iOff][0] += (2.0 * lcg61(&seed) - 1.0) * delta; s->atoms->r[iOff][1] += (2.0 * lcg61(&seed) - 1.0) * delta; s->atoms->r[iOff][2] += (2.0 * lcg61(&seed) - 1.0) * delta; } } } ///Computes the center of mass velocity of the system. void computeVcm(SimFlat * s, real_t vcm[3]) { real_t vcmLocal[4] = {0., 0., 0., 0.}; real_t vcmSum[4] = {0., 0., 0., 0.}; real_t v0 = 0.0; real_t v1 = 0.0; real_t v2 = 0.0; real_t v3 = 0.0; int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; //sum the momenta and particle masses for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { v0 += s->atoms->p[iOff][0]; v1 += s->atoms->p[iOff][1]; v2 += s->atoms->p[iOff][2]; int iSpecies = s->atoms->iSpecies[iOff]; v3 += s->species[iSpecies].mass; } } vcmLocal[0] = v0; vcmLocal[1] = v1; vcmLocal[2] = v2; vcmLocal[3] = v3; startTimer(commReduceTimer); addRealParallel(vcmLocal, vcmSum, 4); stopTimer(commReduceTimer); real_t totalMass = vcmSum[3]; vcm[0] = vcmSum[0] / totalMass; vcm[1] = vcmSum[1] / totalMass; vcm[2] = vcmSum[2] / totalMass; }
/// \file /// Initialize the atom configuration. #include "initAtoms.h" #include <math.h> #include <assert.h> #include "constants.h" #include "decomposition.h" #include "parallel.h" #include "random.h" #include "linkCells.h" #include "timestep.h" #include "memUtils.h" #include "performanceTimers.h" static void computeVcm(SimFlat * s, real_t vcm[3]); ///\details /// Call functions such as createFccLattice and setTemperature to set up /// initial atom positions and momenta. Atoms * initAtoms(LinkCell * boxes) { Atoms *atoms = comdMalloc(sizeof(Atoms)); int maxTotalAtoms = MAXATOMS * boxes->nTotalBoxes; // #pragma sst delete { atoms->gid = (int *)comdMalloc(maxTotalAtoms * sizeof(int)); atoms->iSpecies = (int *)comdMalloc(maxTotalAtoms * sizeof(int)); atoms->r = (real3 *) comdMalloc(maxTotalAtoms * sizeof(real3)); atoms->p = (real3 *) comdMalloc(maxTotalAtoms * sizeof(real3)); atoms->f = (real3 *) comdMalloc(maxTotalAtoms * sizeof(real3)); atoms->U = (real_t *) comdMalloc(maxTotalAtoms * sizeof(real_t)); } atoms->nLocal = 0; atoms->nGlobal = 0; #pragma sst compute for (int iOff = 0; iOff < maxTotalAtoms; iOff++) { atoms->gid[iOff] = 0; atoms->iSpecies[iOff] = 0; zeroReal3(atoms->r[iOff]); zeroReal3(atoms->p[iOff]); zeroReal3(atoms->f[iOff]); atoms->U[iOff] = 0.; } return atoms; } void destroyAtoms(Atoms * atoms) { freeMe(atoms, gid); freeMe(atoms, iSpecies); freeMe(atoms, r); freeMe(atoms, p); freeMe(atoms, f); freeMe(atoms, U); comdFree(atoms); } ///Creates atom positions on a face centered cubic(FCC) lattice with /// nx * ny * nz unit cells and lattice constant lat. /// Set momenta to zero. void createFccLattice(int nx, int ny, int nz, real_t lat, SimFlat * s) { const real_t *localMin = s->domain->localMin; //alias const real_t *localMax = s->domain->localMax; //alias int nb = 4; //number of atoms in the basis real3 basis[4] = { { 0.25, 0.25, 0.25 }, { 0.25, 0.75, 0.75 }, { 0.75, 0.25, 0.75 }, { 0.75, 0.75, 0.25 } }; //create and place atoms int begin[3]; int end[3]; for (int ii = 0; ii < 3; ++ii) { begin[ii] = floor(localMin[ii] / lat); end[ii] = ceil(localMax[ii] / lat); } real_t px, py, pz; px = py = pz = 0.0; #pragma sst compute for (int ix = begin[0]; ix < end[0]; ++ix) for (int iy = begin[1]; iy < end[1]; ++iy) for (int iz = begin[2]; iz < end[2]; ++iz) for (int ib = 0; ib < nb; ++ib) { real_t rx = (ix + basis[ib][0]) * lat; real_t ry = (iy + basis[ib][1]) * lat; real_t rz = (iz + basis[ib][2]) * lat; if (rx < localMin[0] || rx >= localMax[0]) continue; if (ry < localMin[1] || ry >= localMax[1]) continue; if (rz < localMin[2] || rz >= localMax[2]) continue; int id = ib + nb * (iz + nz * (iy + ny * (ix))); putAtomInBox(s->boxes, s->atoms, id, 0, rx, ry, rz, px, py, pz); } #pragma sst init ((int64_t)nb*nx)*((int64_t)(ny*nz)) s->atoms->nGlobal = 0; if (getMyRank() == 0) printf("nb=%d nx=%d ny=%d nz=%d nr=%d nglbl=%lld\n", nb, nx, ny, nz, getNRanks(), s->atoms->nGlobal); #pragma sst init s->atoms->nGlobal / getNRanks() s->atoms->nLocal = s->atoms->nLocal; s->boxes->nTotalAtoms = s->atoms->nLocal; //set total atoms in simulation startTimer(commReduceTimer); addIntParallel(&s->atoms->nLocal, &s->atoms->nGlobal, 1); stopTimer(commReduceTimer); #pragma sst delete assert(s->atoms->nGlobal == nb * nx * ny * nz); } ///Sets the center of mass velocity of the system. /// \param[in] newVcm The desired center of mass velocity. void setVcm(SimFlat * s, real_t newVcm[3]) { real_t oldVcm[3]; computeVcm(s, oldVcm); real_t vShift[3]; vShift[0] = (newVcm[0] - oldVcm[0]); vShift[1] = (newVcm[1] - oldVcm[1]); vShift[2] = (newVcm[2] - oldVcm[2]); int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; #pragma omp parallel for for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { int iSpecies = s->atoms->iSpecies[iOff]; real_t mass = s->species[iSpecies].mass; s->atoms->p[iOff][0] += mass * vShift[0]; s->atoms->p[iOff][1] += mass * vShift[1]; s->atoms->p[iOff][2] += mass * vShift[2]; } } } ///Sets the temperature of system. /// ///Selects atom velocities randomly from a boltzmann(equilibrium) /// distribution that corresponds to the specified temperature.This /// random process will typically result in a small, but non zero center /// of mass velocity and a small difference from the specified /// temperature.For typical MD runs these small differences are /// unimportant, However, to avoid possible confusion, we set the center /// of mass velocity to zero and scale the velocities to exactly match /// the input temperature. void setTemperature(SimFlat * s, real_t temperature) { s->initialTemp = temperature; int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; //set initial velocities for the distribution #pragma omp parallel for for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { int iType = s->atoms->iSpecies[iOff]; real_t mass = s->species[iType].mass; real_t sigma = sqrt(kB_eV * temperature / mass); uint64_t seed = mkSeed(s->atoms->gid[iOff], 123); s->atoms->p[iOff][0] = mass * sigma * gasdev(&seed); s->atoms->p[iOff][1] = mass * sigma * gasdev(&seed); s->atoms->p[iOff][2] = mass * sigma * gasdev(&seed); } } //compute the resulting temperature // kinetic energy = 3 / 2 kB * Temperature if (temperature == 0.0) return; real_t vZero[3] = {0., 0., 0.}; setVcm(s, vZero); kineticEnergy(s); real_t temp = (s->eKinetic / s->atoms->nGlobal) / kB_eV / 1.5; //scale the velocities to achieve the target temperature real_t scaleFactor = sqrt(temperature / temp); #pragma omp parallel for for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { s->atoms->p[iOff][0] *= scaleFactor; s->atoms->p[iOff][1] *= scaleFactor; s->atoms->p[iOff][2] *= scaleFactor; } } kineticEnergy(s); temp = s->eKinetic / s->atoms->nGlobal / kB_eV / 1.5; } ///Add a random displacement to the atom positions. /// Atoms are displaced by a random distance in the range ///[-delta, +delta] along each axis. /// \param[in] delta The maximum displacement(along each axis). void randomDisplacements(SimFlat * s, real_t delta) { int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; #pragma omp parallel for for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { uint64_t seed = mkSeed(s->atoms->gid[iOff], 457); s->atoms->r[iOff][0] += (2.0 * lcg61(&seed) - 1.0) * delta; s->atoms->r[iOff][1] += (2.0 * lcg61(&seed) - 1.0) * delta; s->atoms->r[iOff][2] += (2.0 * lcg61(&seed) - 1.0) * delta; } } } ///Computes the center of mass velocity of the system. void computeVcm(SimFlat * s, real_t vcm[3]) { real_t vcmLocal[4] = {0., 0., 0., 0.}; real_t vcmSum[4] = {0., 0., 0., 0.}; real_t v0 = 0.0; real_t v1 = 0.0; real_t v2 = 0.0; real_t v3 = 0.0; int avgAtomsPerBox = s->boxes->nTotalAtoms / s->boxes->nLocalBoxes; //sum the momenta and particle masses #pragma omp parallel for reduction(+:v0) reduction(+:v1) reduction(+:v2) reduction(+:v3) for (int iBox = 0; iBox < s->boxes->nLocalBoxes; ++iBox) { #pragma sst loop_count avgAtomsPerBox for (int iOff = MAXATOMS * iBox, ii = 0; ii < s->boxes->nAtoms[iBox]; ++ii, ++iOff) { v0 += s->atoms->p[iOff][0]; v1 += s->atoms->p[iOff][1]; v2 += s->atoms->p[iOff][2]; int iSpecies = s->atoms->iSpecies[iOff]; v3 += s->species[iSpecies].mass; } } vcmLocal[0] = v0; vcmLocal[1] = v1; vcmLocal[2] = v2; vcmLocal[3] = v3; startTimer(commReduceTimer); addRealParallel(vcmLocal, vcmSum, 4); stopTimer(commReduceTimer); real_t totalMass = vcmSum[3]; vcm[0] = vcmSum[0] / totalMass; vcm[1] = vcmSum[1] / totalMass; vcm[2] = vcmSum[2] / totalMass; }
GB_unaryop__minv_uint64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_int64 // op(A') function: GB_tran__minv_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_int64 ( uint64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_int64 // op(A') function: GB_tran__minv_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_int64 ( uint64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_int64 // op(A') function: GB_tran__minv_uint64_int64 // C type: uint64_t // A type: int64_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_int64 ( uint64_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
selu_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "selu_kernel_arm.h" #include "neon_mathfun.h" #include <math.h> #include <arm_neon.h> void selu_kernel(int i, int id, void* data, const float* input, float* output, float alpha, float lambda) { float alpha_lambda = alpha * lambda; int step = ((int*)data)[0]; float32x4_t _one = vdupq_n_f32(1.f); float32x4_t _zero = vdupq_n_f32(0.f); float32x4_t _alpha_lambda = vdupq_n_f32(alpha_lambda); float32x4_t _lambda = vdupq_n_f32(lambda); const float* cur_input = input + id * step; float* cur_output = output + id * step; for (int i = 0; i < (step & -4); i += 4) { float32x4_t _p = vld1q_f32(cur_input); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _nps = exp_ps(_p); _nps = vsubq_f32(_nps, _one); _nps = vmulq_f32(_nps, _alpha_lambda); _p = vmulq_f32(_p, _lambda); _p = vbslq_f32(_lemask, _nps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { if (cur_input[0] < 0.f) cur_output[0] = (exp(cur_input[0]) - 1.f) * alpha_lambda; else cur_output[0] = cur_input[0] * lambda; cur_input++; cur_output++; } } int selu_run(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param, int num_thread) { float* data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; float alpha = selu_param->alpha; float lambda = selu_param->lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; selu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha, lambda); } return 0; }
/* * Copyright (c) 2021, OPEN AI LAB Author: haitao@openailab.com */ #include "selu_kernel_arm.h" #include "neon_mathfun.h" #include <math.h> #include <arm_neon.h> void selu_kernel(int i, int id, void *data, const float *input, float *output, float alpha, float lambda) { float alpha_lambda = alpha * lambda; int step = ((int *)data)[0]; float32x4_t _one = vdupq_n_f32(1. f); float32x4_t _zero = vdupq_n_f32(0. f); float32x4_t _alpha_lambda = vdupq_n_f32(alpha_lambda); float32x4_t _lambda = vdupq_n_f32(lambda); const float *cur_input = input + id * step; float *cur_output = output + id * step; for (int i = 0; i < (step & -4); i += 4) { float32x4_t _p = vld1q_f32(cur_input); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _nps = exp_ps(_p); _nps = vsubq_f32(_nps, _one); _nps = vmulq_f32(_nps, _alpha_lambda); _p = vmulq_f32(_p, _lambda); _p = vbslq_f32(_lemask, _nps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { if (cur_input[0] < 0. f) cur_output[0] = (exp(cur_input[0]) - 1. f) * alpha_lambda; else cur_output[0] = cur_input[0] * lambda; cur_input++; cur_output++; } } int selu_run(struct tensor *output_tensor, struct tensor *input_tensor, struct selu_param *selu_param, int num_thread) { float *data = (float *)input_tensor->data; float *out_data = (float *)output_tensor->data; float alpha = selu_param->alpha; float lambda = selu_param->lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; selu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha, lambda); } return 0; }
/* * Copyright (c) 2021, OPEN AI LAB Author: haitao@openailab.com */ #include "selu_kernel_arm.h" #include "neon_mathfun.h" #include <math.h> #include <arm_neon.h> void selu_kernel(int i, int id, void *data, const float *input, float *output, float alpha, float lambda) { float alpha_lambda = alpha * lambda; int step = ((int *)data)[0]; float32x4_t _one = vdupq_n_f32(1. f); float32x4_t _zero = vdupq_n_f32(0. f); float32x4_t _alpha_lambda = vdupq_n_f32(alpha_lambda); float32x4_t _lambda = vdupq_n_f32(lambda); const float *cur_input = input + id * step; float *cur_output = output + id * step; for (int i = 0; i < (step & -4); i += 4) { float32x4_t _p = vld1q_f32(cur_input); uint32x4_t _lemask = vcleq_f32(_p, _zero); float32x4_t _nps = exp_ps(_p); _nps = vsubq_f32(_nps, _one); _nps = vmulq_f32(_nps, _alpha_lambda); _p = vmulq_f32(_p, _lambda); _p = vbslq_f32(_lemask, _nps, _p); vst1q_f32(cur_output, _p); cur_input += 4; cur_output += 4; } for (int i = step & ~3; i < step; i++) { if (cur_input[0] < 0. f) cur_output[0] = (exp(cur_input[0]) - 1. f) * alpha_lambda; else cur_output[0] = cur_input[0] * lambda; cur_input++; cur_output++; } } int selu_run(struct tensor *output_tensor, struct tensor *input_tensor, struct selu_param *selu_param, int num_thread) { float *data = (float *)input_tensor->data; float *out_data = (float *)output_tensor->data; float alpha = selu_param->alpha; float lambda = selu_param->lambda; int chan_num = input_tensor->dims[0] * input_tensor->dims[1]; int chan_size = input_tensor->dims[2] * input_tensor->dims[3]; #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < chan_num; i++) { int offset = i * chan_size; selu_kernel(0, 0, &chan_size, data + offset, out_data + offset, alpha, lambda); } return 0; }
GB_binop__gt_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__gt_uint8 // A.*B function (eWiseMult): GB_AemultB__gt_uint8 // A*D function (colscale): GB_AxD__gt_uint8 // D*A function (rowscale): GB_DxB__gt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__gt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__gt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__gt_uint8 // C=scalar+B GB_bind1st__gt_uint8 // C=scalar+B' GB_bind1st_tran__gt_uint8 // C=A+scalar GB_bind2nd__gt_uint8 // C=A'+scalar GB_bind2nd_tran__gt_uint8 // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__gt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__gt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__gt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__gt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__gt_uint8 // A.*B function (eWiseMult): GB_AemultB__gt_uint8 // A*D function (colscale): GB_AxD__gt_uint8 // D*A function (rowscale): GB_DxB__gt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__gt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__gt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__gt_uint8 // C=scalar+B GB_bind1st__gt_uint8 // C=scalar+B' GB_bind1st_tran__gt_uint8 // C=A+scalar GB_bind2nd__gt_uint8 // C=A'+scalar GB_bind2nd_tran__gt_uint8 // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__gt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__gt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__gt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__gt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__gt_uint8 // A.*B function (eWiseMult): GB_AemultB__gt_uint8 // A*D function (colscale): GB_AxD__gt_uint8 // D*A function (rowscale): GB_DxB__gt_uint8 // C+=B function (dense accum): GB_Cdense_accumB__gt_uint8 // C+=b function (dense accum): GB_Cdense_accumb__gt_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__gt_uint8 // C=scalar+B GB_bind1st__gt_uint8 // C=scalar+B' GB_bind1st_tran__gt_uint8 // C=A+scalar GB_bind2nd__gt_uint8 // C=A'+scalar GB_bind2nd_tran__gt_uint8 // C type: bool // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT8 || GxB_NO_GT_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__gt_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__gt_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__gt_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__gt_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__gt_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__gt_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test_encap_decap.c
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Encapsulate a secret and use the secret to encrypt a message Decapsulate the secret and use the secret to decrypt the encrypted message */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <amcl/utils.h> #include <amcl/randapi.h> #include <amcl/bls_BLS381.h> #include <oqs/oqs.h> #include <pqnist/pqnist.h> #define NTHREADS 8 #define MAXSIZE 256 #define G2LEN 4*BFS_BLS381 int main() { int i,rc; // Seed value for CSPRNG char seed[PQNIST_SEED_LENGTH]; octet SEED = {sizeof(seed),sizeof(seed),seed}; // Seed value for key generation char seedkeys[NTHREADS][PQNIST_SEED_LENGTH]; csprng RNG; // Initialization vector char iv[PQNIST_AES_IV_LENGTH]; octet IV= {sizeof(iv),sizeof(iv),iv}; // Message to be sent to Bob char p[NTHREADS][MAXSIZE]; octet P[NTHREADS]; // AES CBC ciphertext char c[NTHREADS][MAXSIZE]; octet C[NTHREADS]; // non random seed value for (i=0; i<32; i++) SEED.val[i]=i+1; printf("SEED: "); OCT_output(&SEED); printf("\n"); // initialise random number generator CREATE_CSPRNG(&RNG,&SEED); // Initialise key generation seed for(i=0; i<NTHREADS; i++) { for(int j=0; j<PQNIST_SEED_LENGTH; j++) { seedkeys[i][j] = i; } } // Bob's SIKE keys uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key]; uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key]; // Alice's BLS keys (not used) char BLSpk[NTHREADS][G2LEN]; char BLSsk[NTHREADS][BGS_BLS381]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { rc = pqnist_keys(seedkeys[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]); if (rc) { fprintf(stderr, "FAILURE pqnist_keys rc: %d\n", rc); OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); exit(EXIT_FAILURE); } int j = OQS_KEM_sike_p751_length_public_key; printf("Bob SIKE pklen %d pk: ", j); amcl_print_hex(SIKEpk[i], j); j = OQS_KEM_sike_p751_length_secret_key; printf("Bob SIKE sklen %d sk: ", j); amcl_print_hex(SIKEsk[i], j); } // Alice for(i=0; i<NTHREADS; i++) { bzero(p[i],sizeof(p[i])); P[i].max = MAXSIZE; P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i); P[i].val = p[i]; // Pad message int l = 16 - (P[i].len % 16); if (l < 16) { OCT_jbyte(&P[i],0,l); } } // Random initialization value generateRandom(&RNG,&IV); printf("Alice IV: "); OCT_output(&IV); // Copy plaintext for(i=0; i<NTHREADS; i++) { C[i].val = c[i]; C[i].max = MAXSIZE; OCT_copy(&C[i],&P[i]); printf("Alice Plaintext: "); OCT_output_string(&C[i]); printf("\n"); } // SIKE encapsulated key uint8_t ek[NTHREADS][OQS_KEM_sike_p751_length_ciphertext]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Generate an AES which is ecapsulated using SIKE. Use this key to // AES encrypt the K parameter. rc = pqnist_encapsulate_encrypt(C[i].val, C[i].len, IV.val, SIKEpk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_encapsulate_encrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Alice ciphertext: "); OCT_output(&C[i]); printf("Alice ek %lu ek: ", sizeof(ek[i])); amcl_print_hex(ek[i], sizeof(ek[i])); printf("\n"); } // Bob #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Obtain encapsulated AES key and decrypt C rc = pqnist_decapsulate_decrypt(C[i].val, C[i].len, IV.val, SIKEsk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_decapsulate_decrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Bob Plaintext: "); OCT_output(&C[i]); printf("Bob Plaintext: "); OCT_output_string(&C[i]); printf("\n"); // Compare sent and recieved message (returns 0 for failure) rc = OCT_comp(&P[i],&C[i]); if(!rc) { fprintf(stderr, "FAILURE OCT_comp rc: %d\n", rc); exit(EXIT_FAILURE); } } // clear memory OCT_clear(&IV); for(i=0; i<NTHREADS; i++) { OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); OCT_clear(&P[i]); OCT_clear(&C[i]); } KILL_CSPRNG(&RNG); printf("SUCCESS\n"); exit(EXIT_SUCCESS); }
/* Encapsulate a secret and use the secret to encrypt a message Decapsulate the secret and use the secret to decrypt the encrypted message */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <amcl/utils.h> #include <amcl/randapi.h> #include <amcl/bls_BLS381.h> #include <oqs/oqs.h> #include <pqnist/pqnist.h> #define NTHREADS 8 #define MAXSIZE 256 #define G2LEN 4*BFS_BLS381 int main() { int i,rc; // Seed value for CSPRNG char seed[PQNIST_SEED_LENGTH]; octet SEED = {sizeof(seed),sizeof(seed),seed}; // Seed value for key generation char seedkeys[NTHREADS][PQNIST_SEED_LENGTH]; csprng RNG; // Initialization vector char iv[PQNIST_AES_IV_LENGTH]; octet IV= {sizeof(iv),sizeof(iv),iv}; // Message to be sent to Bob char p[NTHREADS][MAXSIZE]; octet P[NTHREADS]; // AES CBC ciphertext char c[NTHREADS][MAXSIZE]; octet C[NTHREADS]; // non random seed value for (i=0; i<32; i++) SEED.val[i]=i+1; printf("SEED: "); OCT_output(&SEED); printf("\n"); // initialise random number generator CREATE_CSPRNG(&RNG,&SEED); // Initialise key generation seed for(i=0; i<NTHREADS; i++) { for(int j=0; j<PQNIST_SEED_LENGTH; j++) { seedkeys[i][j] = i; } } // Bob's SIKE keys uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key]; uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key]; // Alice's BLS keys (not used) char BLSpk[NTHREADS][G2LEN]; char BLSsk[NTHREADS][BGS_BLS381]; for(i=0; i<NTHREADS; i++) { rc = pqnist_keys(seedkeys[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]); if (rc) { fprintf(stderr, "FAILURE pqnist_keys rc: %d\n", rc); OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); exit(EXIT_FAILURE); } int j = OQS_KEM_sike_p751_length_public_key; printf("Bob SIKE pklen %d pk: ", j); amcl_print_hex(SIKEpk[i], j); j = OQS_KEM_sike_p751_length_secret_key; printf("Bob SIKE sklen %d sk: ", j); amcl_print_hex(SIKEsk[i], j); } // Alice for(i=0; i<NTHREADS; i++) { bzero(p[i],sizeof(p[i])); P[i].max = MAXSIZE; P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i); P[i].val = p[i]; // Pad message int l = 16 - (P[i].len % 16); if (l < 16) { OCT_jbyte(&P[i],0,l); } } // Random initialization value generateRandom(&RNG,&IV); printf("Alice IV: "); OCT_output(&IV); // Copy plaintext for(i=0; i<NTHREADS; i++) { C[i].val = c[i]; C[i].max = MAXSIZE; OCT_copy(&C[i],&P[i]); printf("Alice Plaintext: "); OCT_output_string(&C[i]); printf("\n"); } // SIKE encapsulated key uint8_t ek[NTHREADS][OQS_KEM_sike_p751_length_ciphertext]; for(i=0; i<NTHREADS; i++) { // Generate an AES which is ecapsulated using SIKE. Use this key to // AES encrypt the K parameter. rc = pqnist_encapsulate_encrypt(C[i].val, C[i].len, IV.val, SIKEpk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_encapsulate_encrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Alice ciphertext: "); OCT_output(&C[i]); printf("Alice ek %lu ek: ", sizeof(ek[i])); amcl_print_hex(ek[i], sizeof(ek[i])); printf("\n"); } // Bob for(i=0; i<NTHREADS; i++) { // Obtain encapsulated AES key and decrypt C rc = pqnist_decapsulate_decrypt(C[i].val, C[i].len, IV.val, SIKEsk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_decapsulate_decrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Bob Plaintext: "); OCT_output(&C[i]); printf("Bob Plaintext: "); OCT_output_string(&C[i]); printf("\n"); // Compare sent and recieved message (returns 0 for failure) rc = OCT_comp(&P[i],&C[i]); if(!rc) { fprintf(stderr, "FAILURE OCT_comp rc: %d\n", rc); exit(EXIT_FAILURE); } } // clear memory OCT_clear(&IV); for(i=0; i<NTHREADS; i++) { OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); OCT_clear(&P[i]); OCT_clear(&C[i]); } KILL_CSPRNG(&RNG); printf("SUCCESS\n"); exit(EXIT_SUCCESS); }
/* Encapsulate a secret and use the secret to encrypt a message Decapsulate the secret and use the secret to decrypt the encrypted message */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <amcl/utils.h> #include <amcl/randapi.h> #include <amcl/bls_BLS381.h> #include <oqs/oqs.h> #include <pqnist/pqnist.h> #define NTHREADS 8 #define MAXSIZE 256 #define G2LEN 4*BFS_BLS381 int main() { int i,rc; // Seed value for CSPRNG char seed[PQNIST_SEED_LENGTH]; octet SEED = {sizeof(seed),sizeof(seed),seed}; // Seed value for key generation char seedkeys[NTHREADS][PQNIST_SEED_LENGTH]; csprng RNG; // Initialization vector char iv[PQNIST_AES_IV_LENGTH]; octet IV= {sizeof(iv),sizeof(iv),iv}; // Message to be sent to Bob char p[NTHREADS][MAXSIZE]; octet P[NTHREADS]; // AES CBC ciphertext char c[NTHREADS][MAXSIZE]; octet C[NTHREADS]; // non random seed value for (i=0; i<32; i++) SEED.val[i]=i+1; printf("SEED: "); OCT_output(&SEED); printf("\n"); // initialise random number generator CREATE_CSPRNG(&RNG,&SEED); // Initialise key generation seed for(i=0; i<NTHREADS; i++) { for(int j=0; j<PQNIST_SEED_LENGTH; j++) { seedkeys[i][j] = i; } } // Bob's SIKE keys uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key]; uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key]; // Alice's BLS keys (not used) char BLSpk[NTHREADS][G2LEN]; char BLSsk[NTHREADS][BGS_BLS381]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { rc = pqnist_keys(seedkeys[i], SIKEpk[i], SIKEsk[i], BLSpk[i], BLSsk[i]); if (rc) { fprintf(stderr, "FAILURE pqnist_keys rc: %d\n", rc); OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); exit(EXIT_FAILURE); } int j = OQS_KEM_sike_p751_length_public_key; printf("Bob SIKE pklen %d pk: ", j); amcl_print_hex(SIKEpk[i], j); j = OQS_KEM_sike_p751_length_secret_key; printf("Bob SIKE sklen %d sk: ", j); amcl_print_hex(SIKEsk[i], j); } // Alice for(i=0; i<NTHREADS; i++) { bzero(p[i],sizeof(p[i])); P[i].max = MAXSIZE; P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i); P[i].val = p[i]; // Pad message int l = 16 - (P[i].len % 16); if (l < 16) { OCT_jbyte(&P[i],0,l); } } // Random initialization value generateRandom(&RNG,&IV); printf("Alice IV: "); OCT_output(&IV); // Copy plaintext for(i=0; i<NTHREADS; i++) { C[i].val = c[i]; C[i].max = MAXSIZE; OCT_copy(&C[i],&P[i]); printf("Alice Plaintext: "); OCT_output_string(&C[i]); printf("\n"); } // SIKE encapsulated key uint8_t ek[NTHREADS][OQS_KEM_sike_p751_length_ciphertext]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Generate an AES which is ecapsulated using SIKE. Use this key to // AES encrypt the K parameter. rc = pqnist_encapsulate_encrypt(C[i].val, C[i].len, IV.val, SIKEpk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_encapsulate_encrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Alice ciphertext: "); OCT_output(&C[i]); printf("Alice ek %lu ek: ", sizeof(ek[i])); amcl_print_hex(ek[i], sizeof(ek[i])); printf("\n"); } // Bob #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Obtain encapsulated AES key and decrypt C rc = pqnist_decapsulate_decrypt(C[i].val, C[i].len, IV.val, SIKEsk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_decapsulate_decrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Bob Plaintext: "); OCT_output(&C[i]); printf("Bob Plaintext: "); OCT_output_string(&C[i]); printf("\n"); // Compare sent and recieved message (returns 0 for failure) rc = OCT_comp(&P[i],&C[i]); if(!rc) { fprintf(stderr, "FAILURE OCT_comp rc: %d\n", rc); exit(EXIT_FAILURE); } } // clear memory OCT_clear(&IV); for(i=0; i<NTHREADS; i++) { OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); OCT_clear(&P[i]); OCT_clear(&C[i]); } KILL_CSPRNG(&RNG); printf("SUCCESS\n"); exit(EXIT_SUCCESS); }
matmul.par2d.c
#include <math.h> #include <omp.h> #define ceild(n, d) ceil(((double)(n)) / ((double)(d))) #define floord(n, d) floor(((double)(n)) / ((double)(d))) #define max(x, y) ((x) > (y) ? (x) : (y)) #define min(x, y) ((x) < (y) ? (x) : (y)) #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #define M 2048 #define N 2048 #define K 2048 #define alpha 1 #define beta 1 double A[M][K + 13]; double B[K][N + 13]; double C[M][N + 13]; #ifdef PERFCTR #include "papi_defs.h" #include <papi.h> #endif #include <sys/time.h> #include <unistd.h> #ifdef TIME #define IF_TIME(foo) foo; #else #define IF_TIME(foo) #endif void init_array() { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (i + j); B[i][j] = (double)(i * j); C[i][j] = 0.0; } } } void print_array() { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { fprintf(stderr, "%lf ", C[i][j]); if (j % 80 == 79) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } } double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } double t_start, t_end; int main() { int i, j, k; register double s; init_array(); #ifdef PERFCTR PERF_INIT; #endif IF_TIME(t_start = rtclock()); int t1, t2, t3, t4, t5, t6; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; omp_set_nested(1); omp_set_num_threads(2); /* Generated from PLUTO-produced CLooG file by CLooG v0.14.1 64 bits in 0.01s. */ if ((M >= 1) && (N >= 1) && (K >= 1)) { lb1 = 0; ub1 = floord(M - 1, 32); #pragma omp parallel for shared(lb1, ub1) private(lb2, ub2, t1, t2, t3, t4, \ t5, t6) for (t1 = lb1; t1 <= ub1; t1++) { lb2 = 0; ub2 = floord(N - 1, 32); #pragma omp parallel for shared(t1, lb1, ub1, lb2, ub2) private(t2, t3, t4, \ t5, t6) for (t2 = lb2; t2 <= ub2; t2++) { for (t3 = 0; t3 <= floord(K - 1, 32); t3++) { for (t4 = max(0, 32 * t1); t4 <= min(M - 1, 32 * t1 + 31); t4++) { for (t5 = max(0, 32 * t3); t5 <= min(K - 1, 32 * t3 + 31); t5++) { { lbv = max(0, 32 * t2); ubv = min(N - 1, 32 * t2 + 31); #pragma ivdep #pragma vector always for (t6 = lbv; t6 <= ubv; t6++) { C[t4][t6] = C[t4][t6] + A[t4][t5] * B[t5][t6]; ; } } } } } } } } /* End of CLooG code */ IF_TIME(t_end = rtclock()); IF_TIME(printf("%0.6lfs\n", t_end - t_start)); #ifdef PERFCTR PERF_EXIT; #endif #ifdef TEST print_array(); #endif return 0; }
#include <math.h> #include <omp.h> #define ceild(n, d) ceil(((double)(n)) / ((double)(d))) #define floord(n, d) floor(((double)(n)) / ((double)(d))) #define max(x, y) ((x) > (y) ? (x) : (y)) #define min(x, y) ((x) < (y) ? (x) : (y)) #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #define M 2048 #define N 2048 #define K 2048 #define alpha 1 #define beta 1 double A[M][K + 13]; double B[K][N + 13]; double C[M][N + 13]; #ifdef PERFCTR #include "papi_defs.h" #include <papi.h> #endif #include <sys/time.h> #include <unistd.h> #ifdef TIME #define IF_TIME(foo) foo; #else #define IF_TIME(foo) #endif void init_array() { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (i + j); B[i][j] = (double)(i * j); C[i][j] = 0.0; } } } void print_array() { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { fprintf(stderr, "%lf ", C[i][j]); if (j % 80 == 79) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } } double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } double t_start, t_end; int main() { int i, j, k; register double s; init_array(); #ifdef PERFCTR PERF_INIT; #endif IF_TIME(t_start = rtclock()); int t1, t2, t3, t4, t5, t6; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; omp_set_nested(1); omp_set_num_threads(2); /* * Generated from PLUTO-produced CLooG file by CLooG v0.14.1 64 bits in * 0.01s. */ if ((M >= 1) && (N >= 1) && (K >= 1)) { lb1 = 0; ub1 = floord(M - 1, 32); t5, t6) for (t1 = lb1; t1 <= ub1; t1++) { lb2 = 0; ub2 = floord(N - 1, 32); t5, t6) for (t2 = lb2; t2 <= ub2; t2++) { for (t3 = 0; t3 <= floord(K - 1, 32); t3++) { for (t4 = max(0, 32 * t1); t4 <= min(M - 1, 32 * t1 + 31); t4++) { for (t5 = max(0, 32 * t3); t5 <= min(K - 1, 32 * t3 + 31); t5++) { { lbv = max(0, 32 * t2); ubv = min(N - 1, 32 * t2 + 31); #pragma ivdep #pragma vector always for (t6 = lbv; t6 <= ubv; t6++) { C[t4][t6] = C[t4][t6] + A[t4][t5] * B[t5][t6]; ; } } } } } } } } /* End of CLooG code */ IF_TIME(t_end = rtclock()); IF_TIME(printf("%0.6lfs\n", t_end - t_start)); #ifdef PERFCTR PERF_EXIT; #endif #ifdef TEST print_array(); #endif return 0; }
#include <math.h> #include <omp.h> #define ceild(n, d) ceil(((double)(n)) / ((double)(d))) #define floord(n, d) floor(((double)(n)) / ((double)(d))) #define max(x, y) ((x) > (y) ? (x) : (y)) #define min(x, y) ((x) < (y) ? (x) : (y)) #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #define M 2048 #define N 2048 #define K 2048 #define alpha 1 #define beta 1 double A[M][K + 13]; double B[K][N + 13]; double C[M][N + 13]; #ifdef PERFCTR #include "papi_defs.h" #include <papi.h> #endif #include <sys/time.h> #include <unistd.h> #ifdef TIME #define IF_TIME(foo) foo; #else #define IF_TIME(foo) #endif void init_array() { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (i + j); B[i][j] = (double)(i * j); C[i][j] = 0.0; } } } void print_array() { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { fprintf(stderr, "%lf ", C[i][j]); if (j % 80 == 79) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } } double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d", stat); return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } double t_start, t_end; int main() { int i, j, k; register double s; init_array(); #ifdef PERFCTR PERF_INIT; #endif IF_TIME(t_start = rtclock()); int t1, t2, t3, t4, t5, t6; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; omp_set_nested(1); omp_set_num_threads(2); /* * Generated from PLUTO-produced CLooG file by CLooG v0.14.1 64 bits in * 0.01s. */ if ((M >= 1) && (N >= 1) && (K >= 1)) { lb1 = 0; ub1 = floord(M - 1, 32); #pragma omp parallel for shared(lb1, ub1) private(lb2, ub2, t1, t2, t3, t4, \ t5, t6) for (t1 = lb1; t1 <= ub1; t1++) { lb2 = 0; ub2 = floord(N - 1, 32); #pragma omp parallel for shared(t1, lb1, ub1, lb2, ub2) private(t2, t3, t4, \ t5, t6) for (t2 = lb2; t2 <= ub2; t2++) { for (t3 = 0; t3 <= floord(K - 1, 32); t3++) { for (t4 = max(0, 32 * t1); t4 <= min(M - 1, 32 * t1 + 31); t4++) { for (t5 = max(0, 32 * t3); t5 <= min(K - 1, 32 * t3 + 31); t5++) { { lbv = max(0, 32 * t2); ubv = min(N - 1, 32 * t2 + 31); #pragma ivdep #pragma vector always for (t6 = lbv; t6 <= ubv; t6++) { C[t4][t6] = C[t4][t6] + A[t4][t5] * B[t5][t6]; ; } } } } } } } } /* End of CLooG code */ IF_TIME(t_end = rtclock()); IF_TIME(printf("%0.6lfs\n", t_end - t_start)); #ifdef PERFCTR PERF_EXIT; #endif #ifdef TEST print_array(); #endif return 0; }
sum.c
#include<stdio.h> #include<omp.h> int main(int argc, char *argv[]){ int nThreads = 4; omp_set_num_threads(nThreads); int n = 0; scanf("%d", &n); double sum1 = 0, sum2 = 0, sum3 = 0, sum4 = 0; double sum1P[nThreads]; double sum2P[nThreads]; for(int i = 0; i < nThreads+1; i++) sum1P[i] = sum2P[i] = 0; /* * PARALLEL * if(expression) * num_threads(int|expression) * private(list) * firstprivate(list) * shared(list) * default(shared|none) * copyin(list) * reduction(operator: list) //basic operators only */ #pragma omp parallel { int at = omp_get_thread_num(); int end = (at+1)*(n/nThreads); for(int i = at*(n/nThreads); i < end; i++) sum1P[at] += i; } if(n%4 == 0) sum1 += n*((n%4)+1)-(n%4); else if(n%4 == 1) sum1 += n*((n%4)+1)-(n%4); else if(n%4 == 2) sum1 += n*((n%4)+1)-(n%4-1)-2; else sum1 += n*((n%4)+1)-2*(n%4); for(int i = 0; i < nThreads; i++){ sum1 += sum1P[i]; } #pragma omp parallel for for(int i = 0; i <= n; i++){ sum2P[omp_get_thread_num()] += i; } for(int i = 0; i < nThreads; i++){ sum2 += sum2P[i]; } #pragma omp parallel for reduction(+: sum3) for(int i = 0; i <= n; i++){ //printf("Hello from thread #%d iteration #%d\n", omp_get_thread_num(), i); sum3 += i; } #pragma omp parallel sections { #pragma omp section { double sumP = 0; #pragma omp parallel for for(int i = 0; i <= n; i += 2){ sumP += i; } #pragma omp atomic sum4 += sumP; } #pragma omp section { double sumP = 0; #pragma omp parallel for for(int i = 1; i <= n; i += 2){ sumP += i; } #pragma omp atomic sum4 += sumP; } } printf("Sum1 from 0 to %d = %.0lf\n", n, sum1); printf("Sum2 from 0 to %d = %.0lf\n", n, sum2); printf("Sum3 from 0 to %d = %.0lf\n", n, sum3); printf("Sum4 from 0 to %d = %.0lf\n", n, sum4); return 0; }
#include<stdio.h> #include<omp.h> int main(int argc, char *argv[]) { int nThreads = 4; omp_set_num_threads(nThreads); int n = 0; scanf("%d", &n); double sum1 = 0, sum2 = 0, sum3 = 0, sum4 = 0; double sum1P[nThreads]; double sum2P[nThreads]; for (int i = 0; i < nThreads + 1; i++) sum1P[i] = sum2P[i] = 0; /* * PARALLEL if(expression) num_threads(int|expression) private(list) * firstprivate(list) shared(list) default(shared|none) copyin(list) * reduction(operator: list) //basic operators only */ int at = omp_get_thread_num(); int end = (at + 1) * (n / nThreads); for (int i = at * (n / nThreads); i < end; i++) sum1P[at] += i; if (n % 4 == 0) sum1 += n * ((n % 4) + 1) - (n % 4); else if (n % 4 == 1) sum1 += n * ((n % 4) + 1) - (n % 4); else if (n % 4 == 2) sum1 += n * ((n % 4) + 1) - (n % 4 - 1) - 2; else sum1 += n * ((n % 4) + 1) - 2 * (n % 4); for (int i = 0; i < nThreads; i++) { sum1 += sum1P[i]; } for (int i = 0; i <= n; i++) { sum2P[omp_get_thread_num()] += i; } for (int i = 0; i < nThreads; i++) { sum2 += sum2P[i]; } for (int i = 0; i <= n; i++) { //printf("Hello from thread #%d iteration #%d\n", omp_get_thread_num(), i); sum3 += i; } #pragma omp section { double sumP = 0; for (int i = 0; i <= n; i += 2) { sumP += i; } sum4 += sumP; } double sumP = 0; for (int i = 1; i <= n; i += 2) { sumP += i; } sum4 += sumP; printf("Sum1 from 0 to %d = %.0lf\n", n, sum1); printf("Sum2 from 0 to %d = %.0lf\n", n, sum2); printf("Sum3 from 0 to %d = %.0lf\n", n, sum3); printf("Sum4 from 0 to %d = %.0lf\n", n, sum4); return 0; }
#include<stdio.h> #include<omp.h> int main(int argc, char *argv[]) { int nThreads = 4; omp_set_num_threads(nThreads); int n = 0; scanf("%d", &n); double sum1 = 0, sum2 = 0, sum3 = 0, sum4 = 0; double sum1P[nThreads]; double sum2P[nThreads]; for (int i = 0; i < nThreads + 1; i++) sum1P[i] = sum2P[i] = 0; /* * PARALLEL if(expression) num_threads(int|expression) private(list) * firstprivate(list) shared(list) default(shared|none) copyin(list) * reduction(operator: list) //basic operators only */ #pragma omp parallel { int at = omp_get_thread_num(); int end = (at + 1) * (n / nThreads); for (int i = at * (n / nThreads); i < end; i++) sum1P[at] += i; } if (n % 4 == 0) sum1 += n * ((n % 4) + 1) - (n % 4); else if (n % 4 == 1) sum1 += n * ((n % 4) + 1) - (n % 4); else if (n % 4 == 2) sum1 += n * ((n % 4) + 1) - (n % 4 - 1) - 2; else sum1 += n * ((n % 4) + 1) - 2 * (n % 4); for (int i = 0; i < nThreads; i++) { sum1 += sum1P[i]; } #pragma omp parallel for for (int i = 0; i <= n; i++) { sum2P[omp_get_thread_num()] += i; } for (int i = 0; i < nThreads; i++) { sum2 += sum2P[i]; } #pragma omp parallel for reduction(+: sum3) for (int i = 0; i <= n; i++) { //printf("Hello from thread #%d iteration #%d\n", omp_get_thread_num(), i); sum3 += i; } #pragma omp parallel sections { #pragma omp section { double sumP = 0; #pragma omp parallel for for (int i = 0; i <= n; i += 2) { sumP += i; } #pragma omp atomic sum4 += sumP; } #pragma omp section { double sumP = 0; #pragma omp parallel for for (int i = 1; i <= n; i += 2) { sumP += i; } #pragma omp atomic sum4 += sumP; } } printf("Sum1 from 0 to %d = %.0lf\n", n, sum1); printf("Sum2 from 0 to %d = %.0lf\n", n, sum2); printf("Sum3 from 0 to %d = %.0lf\n", n, sum3); printf("Sum4 from 0 to %d = %.0lf\n", n, sum4); return 0; }
binbased_projection.h
// KRATOS __ __ _____ ____ _ _ ___ _ _ ____ // | \/ | ____/ ___|| | | |_ _| \ | |/ ___| // | |\/| | _| \___ \| |_| || || \| | | _ // | | | | |___ ___) | _ || || |\ | |_| | // |_| |_|_____|____/|_| |_|___|_| \_|\____| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Antonia Larese De Tetto // #if !defined(KRATOS_BINBASED_PROJECTION ) #define KRATOS_BINBASED_PROJECTION //External includes // System includes #include <string> #include <iostream> #include <stdlib.h> // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "utilities/timer.h" #include "meshing_application_variables.h" //Database includes #include "spatial_containers/spatial_containers.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/binbased_nodes_in_element_locator.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// This class allows the interpolation between non-matching meshes in 2D and 3D. /** @author Antonia Larese De Tetto <antoldt@cimne.upc.edu> * * This class allows the interpolation of a scalar or vectorial variable between non-matching meshes * in 2D and 3D. * * For every node of the destination model part it is checked in which element of the origin model part it is * contained and a linear interpolation is performed * * The data structure used by default is static bin. * In order to use this utility the construction of a bin of object @see BinBasedNodesInElementLocator * and a bin of nodes @see BinBasedFastPointLocator * is required at the beginning of the calculation (only ONCE). */ //class BinBasedMeshTransfer template<std::size_t TDim > class BinBasedMeshTransfer { public: ///@name Type Definitions ///@{ /// Pointer definition of BinBasedMeshTransfer KRATOS_CLASS_POINTER_DEFINITION(BinBasedMeshTransfer<TDim >); /// Node type definition typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; ///@} ///@name Life Cycle ///@{ /// Default constructor. BinBasedMeshTransfer() = default; // /// Destructor. virtual ~BinBasedMeshTransfer() = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ //If you want to pass the whole model part //********************************************************************** //********************************************************************** /// Interpolate the whole problem type /** * @param rOrigin_ModelPart: the model part all the variable should be taken from * @param rDestination_ModelPart: the destination model part where we want to know the values of the variables */ void DirectInterpolation( ModelPart& rOrigin_ModelPart , ModelPart& rDestination_ModelPart ) { KRATOS_TRY KRATOS_ERROR << "Not implemented yet" << std::endl; KRATOS_CATCH("") } //If you want to pass only one variable //********************************************************************** //********************************************************************** /// Interpolate one variable from the fixed mesh to the moving one /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of objects. It is to be constructed separately @see binbased_fast_point_locator.h */ // Form fixed to moving model part template<class TDataType> void DirectVariableInterpolation( ModelPart& rFixed_ModelPart , ModelPart& rMoving_ModelPart, Variable<TDataType>& rFixedDomainVariable , Variable<TDataType>& rMovingDomainVariable, BinBasedFastPointLocator<TDim>& node_locator ) { KRATOS_TRY KRATOS_INFO("BinBasedMeshTransfer") << "Interpolate From Fixed Mesh*************************************" << std::endl; //creating an auxiliary list for the new nodes for(auto node_it = rMoving_ModelPart.NodesBegin(); node_it != rMoving_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rMovingDomainVariable); } Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rMoving_ModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i; NodeType::Pointer pparticle = *(iparticle.base()); auto result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { //Interpolate( ElemIt, N, *it_found , rFixedDomainVariable , rMovingDomainVariable ); Interpolate( pelement, N, pparticle, rFixedDomainVariable , rMovingDomainVariable ); } } KRATOS_CATCH("") } /// Map one variable from the moving mesh to the fixed one -The two meshes should be of the same dimensions otherwise better to use /// MappingFromMovingMesh_VariableMeshes that is a much generic tool. /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of objects (elelments of the fixed mesh). It is to be constructed separately @see binbased_nodes_in_element_locator */ // From moving to fixed model part template<class TDataType> void MappingFromMovingMesh( ModelPart& rMoving_ModelPart , ModelPart& rFixed_ModelPart, Variable<TDataType>& rMovingDomainVariable , Variable<TDataType>& rFixedDomainVariable, BinBasedFastPointLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part ) { KRATOS_TRY KRATOS_INFO("BinBasedMeshTransfer") << "Transfer From Moving Mesh*************************************" << std::endl; if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", ""); if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", ""); //creating an auxiliary list for the new nodes for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rFixedDomainVariable); } for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); node_it++) { // if (node_it->IsFixed(VELOCITY_X) == false) // { // (node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); // (node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0; (node_it)->GetValue(YOUNG_MODULUS) = 0.0; // } } //defintions for spatial search // typedef NodeType PointType; // typedef NodeType::Pointer PointTypePointer; Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rMoving_ModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i; NodeType::Pointer pparticle = *(iparticle.base()); auto result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { GeometryType& geom = pelement->GetGeometry(); // const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY); // const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE); const TDataType& value = (iparticle)->FastGetSolutionStepValue(rMovingDomainVariable); for (std::size_t k = 0; k < geom.size(); k++) { geom[k].SetLock(); geom[k].FastGetSolutionStepValue(rFixedDomainVariable) += N[k] * value; geom[k].GetValue(YOUNG_MODULUS) += N[k]; geom[k].UnSetLock(); } } } for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); node_it++) { const double NN = (node_it)->GetValue(YOUNG_MODULUS); if (NN != 0.0) { (node_it)->FastGetSolutionStepValue(rFixedDomainVariable) /= NN; } } KRATOS_CATCH("") } // From moving to fixed model part /// Interpolate one variable from the moving mesh to the fixed one /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of nodes of the fixed mesh. It is to be constructed separately @see binbased_nodes_in_element_locator */ template<class TDataType> void MappingFromMovingMesh_VariableMeshes( ModelPart& rMoving_ModelPart , ModelPart& rFixed_ModelPart, Variable<TDataType>& rMovingDomainVariable , Variable<TDataType>& rFixedDomainVariable, BinBasedNodesInElementLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part ) { KRATOS_TRY KRATOS_WATCH("Transfer From Moving Mesh*************************************") if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", ""); if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", ""); //creating an auxiliary list for the new nodes for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rFixedDomainVariable); } //defintions for spatial search typedef typename BinBasedNodesInElementLocator<TDim>::PointVector PointVector; typedef typename BinBasedNodesInElementLocator<TDim>::DistanceVector DistanceVector; const std::size_t max_results = 5000; Matrix Nmat(max_results,TDim+1); boost::numeric::ublas::vector<int> positions(max_results); PointVector work_results(max_results); DistanceVector work_distances(max_results); Node<3> work_point(0,0.0,0.0,0.0); for(ModelPart::ElementsContainerType::iterator elem_it = rMoving_ModelPart.ElementsBegin(); elem_it != rMoving_ModelPart.ElementsEnd(); ++elem_it) { std::size_t nfound = node_locator.FindNodesInElement(*(elem_it.base()), positions, Nmat, max_results, work_results.begin(), work_distances.begin(), work_point); for(std::size_t k=0; k<nfound; k++) { auto it = work_results.begin() + positions[k]; array_1d<double,TDim+1> N = row(Nmat,k); Interpolate( *(elem_it.base()), N, *it, rMovingDomainVariable , rFixedDomainVariable); } } KRATOS_CATCH("") } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member rVariables ///@{ ///@} ///@name Protected member rVariables ///@{ template<class T, std::size_t dim> ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member rVariables ///@{ ///@} ///@name Member rVariables ///@{ inline void CalculateCenterAndSearchRadius(GeometryType&geom, double& xc, double& yc, double& zc, double& R, array_1d<double,3>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); xc = 0.3333333333333333333*(x0+x1+x2); yc = 0.3333333333333333333*(y0+y1+y2); zc = 0.0; double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0); double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1); double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2); R = R1; if(R2 > R) R = R2; if(R3 > R) R = R3; R = 1.01 * sqrt(R); } //*************************************** //*************************************** inline void CalculateCenterAndSearchRadius(GeometryType&geom, double& xc, double& yc, double& zc, double& R, array_1d<double,4>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); xc = 0.25*(x0+x1+x2+x3); yc = 0.25*(y0+y1+y2+y3); zc = 0.25*(z0+z1+z2+z3); double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0) + (zc-z0)*(zc-z0); double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1) + (zc-z1)*(zc-z1); double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2) + (zc-z2)*(zc-z2); double R4 = (xc-x3)*(xc-x3) + (yc-y3)*(yc-y3) + (zc-z3)*(zc-z3); R = R1; if(R2 > R) R = R2; if(R3 > R) R = R3; if(R4 > R) R = R4; R = sqrt(R); } //*************************************** //*************************************** inline double CalculateVol( const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) ); } //*************************************** //*************************************** inline double CalculateVol( const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ*0.1666666666666666666667; //return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) ); } //*************************************** //*************************************** inline bool CalculatePosition( GeometryType&geom, const double xc, const double yc, const double zc, array_1d<double,3>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0,y0,x1,y1,x2,y2); double inv_area = 0.0; if(area == 0.0) { // KRATOS_THROW_ERROR(std::logic_error,"element with zero area found",""); //The interpolated node will not be inside an elemente with zero area return false; } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1,y1,x2,y2,xc,yc) * inv_area; N[1] = CalculateVol(x2,y2,x0,y0,xc,yc) * inv_area; N[2] = CalculateVol(x0,y0,x1,y1,xc,yc) * inv_area; if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <=1.0 && N[1]<= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition( GeometryType&geom, const double xc, const double yc, const double zc, array_1d<double,4>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3); double inv_vol = 0.0; if(vol < 0.0000000000001) { // KRATOS_THROW_ERROR(std::logic_error,"element with zero vol found",""); //The interpolated node will not be inside an elemente with zero volume return false; // KRATOS_WATCH("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1,y1,z1,x3,y3,z3,x2,y2,z2,xc,yc,zc) * inv_vol; N[1] = CalculateVol(x3,y3,z3,x0,y0,z0,x2,y2,z2,xc,yc,zc) * inv_vol; N[2] = CalculateVol(x3,y3,z3,x1,y1,z1,x0,y0,z0,xc,yc,zc) * inv_vol; N[3] = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,xc,yc,zc) * inv_vol; if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >=0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <=1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } //ElemI Element iterator //N Shape functions //step_data_size //pnode pointer to the node //projecting total model part 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, int step_data_size, NodeType::Pointer pnode) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double* step_data = (pnode)->SolutionStepData().Data(step); double* node0_data = geom[0].SolutionStepData().Data(step); //copying this data in the position of the vector we are interested in for(int j= 0; j< step_data_size; j++) { step_data[j] = N[0]*node0_data[j]; } for(std::size_t k= 1; k< vector_size; k++) { double* node1_data = geom[k].SolutionStepData().Data(step); for(int j= 0; j< step_data_size; j++) { step_data[j] += N[k]*node1_data[j]; } } } // pnode->GetValue(IS_VISITED) = 1.0; } //projecting an array1D 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, NodeType::Pointer pnode, Variable<array_1d<double,3> >& rOriginVariable, Variable<array_1d<double,3> >& rDestinationVariable) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step array_1d<double,3>& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step); //Reference or no reference???//CANCELLA step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step); // Copying this data in the position of the vector we are interested in for(std::size_t j= 1; j< vector_size; j++) { const array_1d<double,3>& node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step); step_data += N[j] * node_data; } } // pnode->GetValue(IS_VISITED) = 1.0; } //projecting a scalar 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, NodeType::Pointer pnode, Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); //facendo un loop sugli step temporali step_data come salva i dati al passo anteriore? Cioś dove passiamo l'informazione ai nodi??? for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step); //Reference or no reference???//CANCELLA //copying this data in the position of the vector we are interested in step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step); // Copying this data in the position of the vector we are interested in for(std::size_t j= 1; j< vector_size; j++) { const double node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step); step_data += N[j] * node_data; } } // pnode->GetValue(IS_VISITED) = 1.0; } inline void Clear(ModelPart::NodesContainerType::iterator node_it, int step_data_size ) { std::size_t buffer_size = node_it->GetBufferSize(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double* step_data = (node_it)->SolutionStepData().Data(step); //copying this data in the position of the vector we are interested in for(int j= 0; j< step_data_size; j++) { step_data[j] = 0.0; } } } inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it , Variable<array_1d<double,3> >& rVariable) { array_1d<double, 3>& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0); noalias(Aux_var) = ZeroVector(3); } inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it, Variable<double>& rVariable) { double& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0); Aux_var = 0.0; } ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BinBasedMeshTransfer& operator=(BinBasedMeshTransfer const& rOther); ///@} }; // Class BinBasedMeshTransfer ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// output stream function template<std::size_t TDim> inline std::ostream& operator << (std::ostream& rOStream, const BinBasedMeshTransfer<TDim>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_BINBASED_PROJECTION defined
// KRATOS __ __ _____ ____ _ _ ___ _ _ ____ // | \/ | ____/ ___|| | | |_ _| \ | |/ ___| // | |\/| | _| \___ \| |_| || || \| | | _ // | | | | |___ ___) | _ || || |\ | |_| | // |_| |_|_____|____/|_| |_|___|_| \_|\____| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Antonia Larese De Tetto // #if !defined(KRATOS_BINBASED_PROJECTION ) #define KRATOS_BINBASED_PROJECTION //External includes // System includes #include <string> #include <iostream> #include <stdlib.h> // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "utilities/timer.h" #include "meshing_application_variables.h" //Database includes #include "spatial_containers/spatial_containers.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/binbased_nodes_in_element_locator.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// This class allows the interpolation between non-matching meshes in 2D and 3D. /** @author Antonia Larese De Tetto <antoldt@cimne.upc.edu> * * This class allows the interpolation of a scalar or vectorial variable between non-matching meshes * in 2D and 3D. * * For every node of the destination model part it is checked in which element of the origin model part it is * contained and a linear interpolation is performed * * The data structure used by default is static bin. * In order to use this utility the construction of a bin of object @see BinBasedNodesInElementLocator * and a bin of nodes @see BinBasedFastPointLocator * is required at the beginning of the calculation (only ONCE). */ //class BinBasedMeshTransfer template<std::size_t TDim > class BinBasedMeshTransfer { public: ///@name Type Definitions ///@{ /// Pointer definition of BinBasedMeshTransfer KRATOS_CLASS_POINTER_DEFINITION(BinBasedMeshTransfer<TDim >); /// Node type definition typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; ///@} ///@name Life Cycle ///@{ /// Default constructor. BinBasedMeshTransfer() = default; // /// Destructor. virtual ~BinBasedMeshTransfer() = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ //If you want to pass the whole model part //********************************************************************** //********************************************************************** /// Interpolate the whole problem type /** * @param rOrigin_ModelPart: the model part all the variable should be taken from * @param rDestination_ModelPart: the destination model part where we want to know the values of the variables */ void DirectInterpolation( ModelPart& rOrigin_ModelPart , ModelPart& rDestination_ModelPart ) { KRATOS_TRY KRATOS_ERROR << "Not implemented yet" << std::endl; KRATOS_CATCH("") } //If you want to pass only one variable //********************************************************************** //********************************************************************** /// Interpolate one variable from the fixed mesh to the moving one /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of objects. It is to be constructed separately @see binbased_fast_point_locator.h */ // Form fixed to moving model part template<class TDataType> void DirectVariableInterpolation( ModelPart& rFixed_ModelPart , ModelPart& rMoving_ModelPart, Variable<TDataType>& rFixedDomainVariable , Variable<TDataType>& rMovingDomainVariable, BinBasedFastPointLocator<TDim>& node_locator ) { KRATOS_TRY KRATOS_INFO("BinBasedMeshTransfer") << "Interpolate From Fixed Mesh*************************************" << std::endl; //creating an auxiliary list for the new nodes for(auto node_it = rMoving_ModelPart.NodesBegin(); node_it != rMoving_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rMovingDomainVariable); } Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rMoving_ModelPart.Nodes().size(); for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i; NodeType::Pointer pparticle = *(iparticle.base()); auto result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { //Interpolate( ElemIt, N, *it_found , rFixedDomainVariable , rMovingDomainVariable ); Interpolate( pelement, N, pparticle, rFixedDomainVariable , rMovingDomainVariable ); } } KRATOS_CATCH("") } /// Map one variable from the moving mesh to the fixed one -The two meshes should be of the same dimensions otherwise better to use /// MappingFromMovingMesh_VariableMeshes that is a much generic tool. /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of objects (elelments of the fixed mesh). It is to be constructed separately @see binbased_nodes_in_element_locator */ // From moving to fixed model part template<class TDataType> void MappingFromMovingMesh( ModelPart& rMoving_ModelPart , ModelPart& rFixed_ModelPart, Variable<TDataType>& rMovingDomainVariable , Variable<TDataType>& rFixedDomainVariable, BinBasedFastPointLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part ) { KRATOS_TRY KRATOS_INFO("BinBasedMeshTransfer") << "Transfer From Moving Mesh*************************************" << std::endl; if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", ""); if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", ""); //creating an auxiliary list for the new nodes for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rFixedDomainVariable); } for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); node_it++) { // if (node_it->IsFixed(VELOCITY_X) == false) // { // (node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); // (node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0; (node_it)->GetValue(YOUNG_MODULUS) = 0.0; // } } //defintions for spatial search // typedef NodeType PointType; // typedef NodeType::Pointer PointTypePointer; Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rMoving_ModelPart.Nodes().size(); for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i; NodeType::Pointer pparticle = *(iparticle.base()); auto result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { GeometryType& geom = pelement->GetGeometry(); // const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY); // const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE); const TDataType& value = (iparticle)->FastGetSolutionStepValue(rMovingDomainVariable); for (std::size_t k = 0; k < geom.size(); k++) { geom[k].SetLock(); geom[k].FastGetSolutionStepValue(rFixedDomainVariable) += N[k] * value; geom[k].GetValue(YOUNG_MODULUS) += N[k]; geom[k].UnSetLock(); } } } for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); node_it++) { const double NN = (node_it)->GetValue(YOUNG_MODULUS); if (NN != 0.0) { (node_it)->FastGetSolutionStepValue(rFixedDomainVariable) /= NN; } } KRATOS_CATCH("") } // From moving to fixed model part /// Interpolate one variable from the moving mesh to the fixed one /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of nodes of the fixed mesh. It is to be constructed separately @see binbased_nodes_in_element_locator */ template<class TDataType> void MappingFromMovingMesh_VariableMeshes( ModelPart& rMoving_ModelPart , ModelPart& rFixed_ModelPart, Variable<TDataType>& rMovingDomainVariable , Variable<TDataType>& rFixedDomainVariable, BinBasedNodesInElementLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part ) { KRATOS_TRY KRATOS_WATCH("Transfer From Moving Mesh*************************************") if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", ""); if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", ""); //creating an auxiliary list for the new nodes for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rFixedDomainVariable); } //defintions for spatial search typedef typename BinBasedNodesInElementLocator<TDim>::PointVector PointVector; typedef typename BinBasedNodesInElementLocator<TDim>::DistanceVector DistanceVector; const std::size_t max_results = 5000; Matrix Nmat(max_results,TDim+1); boost::numeric::ublas::vector<int> positions(max_results); PointVector work_results(max_results); DistanceVector work_distances(max_results); Node<3> work_point(0,0.0,0.0,0.0); for(ModelPart::ElementsContainerType::iterator elem_it = rMoving_ModelPart.ElementsBegin(); elem_it != rMoving_ModelPart.ElementsEnd(); ++elem_it) { std::size_t nfound = node_locator.FindNodesInElement(*(elem_it.base()), positions, Nmat, max_results, work_results.begin(), work_distances.begin(), work_point); for(std::size_t k=0; k<nfound; k++) { auto it = work_results.begin() + positions[k]; array_1d<double,TDim+1> N = row(Nmat,k); Interpolate( *(elem_it.base()), N, *it, rMovingDomainVariable , rFixedDomainVariable); } } KRATOS_CATCH("") } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member rVariables ///@{ ///@} ///@name Protected member rVariables ///@{ template<class T, std::size_t dim> ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member rVariables ///@{ ///@} ///@name Member rVariables ///@{ inline void CalculateCenterAndSearchRadius(GeometryType&geom, double& xc, double& yc, double& zc, double& R, array_1d<double,3>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); xc = 0.3333333333333333333*(x0+x1+x2); yc = 0.3333333333333333333*(y0+y1+y2); zc = 0.0; double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0); double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1); double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2); R = R1; if(R2 > R) R = R2; if(R3 > R) R = R3; R = 1.01 * sqrt(R); } //*************************************** //*************************************** inline void CalculateCenterAndSearchRadius(GeometryType&geom, double& xc, double& yc, double& zc, double& R, array_1d<double,4>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); xc = 0.25*(x0+x1+x2+x3); yc = 0.25*(y0+y1+y2+y3); zc = 0.25*(z0+z1+z2+z3); double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0) + (zc-z0)*(zc-z0); double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1) + (zc-z1)*(zc-z1); double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2) + (zc-z2)*(zc-z2); double R4 = (xc-x3)*(xc-x3) + (yc-y3)*(yc-y3) + (zc-z3)*(zc-z3); R = R1; if(R2 > R) R = R2; if(R3 > R) R = R3; if(R4 > R) R = R4; R = sqrt(R); } //*************************************** //*************************************** inline double CalculateVol( const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) ); } //*************************************** //*************************************** inline double CalculateVol( const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ*0.1666666666666666666667; //return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) ); } //*************************************** //*************************************** inline bool CalculatePosition( GeometryType&geom, const double xc, const double yc, const double zc, array_1d<double,3>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0,y0,x1,y1,x2,y2); double inv_area = 0.0; if(area == 0.0) { // KRATOS_THROW_ERROR(std::logic_error,"element with zero area found",""); //The interpolated node will not be inside an elemente with zero area return false; } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1,y1,x2,y2,xc,yc) * inv_area; N[1] = CalculateVol(x2,y2,x0,y0,xc,yc) * inv_area; N[2] = CalculateVol(x0,y0,x1,y1,xc,yc) * inv_area; if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <=1.0 && N[1]<= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition( GeometryType&geom, const double xc, const double yc, const double zc, array_1d<double,4>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3); double inv_vol = 0.0; if(vol < 0.0000000000001) { // KRATOS_THROW_ERROR(std::logic_error,"element with zero vol found",""); //The interpolated node will not be inside an elemente with zero volume return false; // KRATOS_WATCH("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1,y1,z1,x3,y3,z3,x2,y2,z2,xc,yc,zc) * inv_vol; N[1] = CalculateVol(x3,y3,z3,x0,y0,z0,x2,y2,z2,xc,yc,zc) * inv_vol; N[2] = CalculateVol(x3,y3,z3,x1,y1,z1,x0,y0,z0,xc,yc,zc) * inv_vol; N[3] = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,xc,yc,zc) * inv_vol; if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >=0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <=1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } //ElemI Element iterator //N Shape functions //step_data_size //pnode pointer to the node //projecting total model part 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, int step_data_size, NodeType::Pointer pnode) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double* step_data = (pnode)->SolutionStepData().Data(step); double* node0_data = geom[0].SolutionStepData().Data(step); //copying this data in the position of the vector we are interested in for(int j= 0; j< step_data_size; j++) { step_data[j] = N[0]*node0_data[j]; } for(std::size_t k= 1; k< vector_size; k++) { double* node1_data = geom[k].SolutionStepData().Data(step); for(int j= 0; j< step_data_size; j++) { step_data[j] += N[k]*node1_data[j]; } } } // pnode->GetValue(IS_VISITED) = 1.0; } //projecting an array1D 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, NodeType::Pointer pnode, Variable<array_1d<double,3> >& rOriginVariable, Variable<array_1d<double,3> >& rDestinationVariable) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step array_1d<double,3>& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step); //Reference or no reference???//CANCELLA step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step); // Copying this data in the position of the vector we are interested in for(std::size_t j= 1; j< vector_size; j++) { const array_1d<double,3>& node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step); step_data += N[j] * node_data; } } // pnode->GetValue(IS_VISITED) = 1.0; } //projecting a scalar 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, NodeType::Pointer pnode, Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); //facendo un loop sugli step temporali step_data come salva i dati al passo anteriore? Cioś dove passiamo l'informazione ai nodi??? for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step); //Reference or no reference???//CANCELLA //copying this data in the position of the vector we are interested in step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step); // Copying this data in the position of the vector we are interested in for(std::size_t j= 1; j< vector_size; j++) { const double node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step); step_data += N[j] * node_data; } } // pnode->GetValue(IS_VISITED) = 1.0; } inline void Clear(ModelPart::NodesContainerType::iterator node_it, int step_data_size ) { std::size_t buffer_size = node_it->GetBufferSize(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double* step_data = (node_it)->SolutionStepData().Data(step); //copying this data in the position of the vector we are interested in for(int j= 0; j< step_data_size; j++) { step_data[j] = 0.0; } } } inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it , Variable<array_1d<double,3> >& rVariable) { array_1d<double, 3>& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0); noalias(Aux_var) = ZeroVector(3); } inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it, Variable<double>& rVariable) { double& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0); Aux_var = 0.0; } ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BinBasedMeshTransfer& operator=(BinBasedMeshTransfer const& rOther); ///@} }; // Class BinBasedMeshTransfer ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// output stream function template<std::size_t TDim> inline std::ostream& operator << (std::ostream& rOStream, const BinBasedMeshTransfer<TDim>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_BINBASED_PROJECTION defined
// KRATOS __ __ _____ ____ _ _ ___ _ _ ____ // | \/ | ____/ ___|| | | |_ _| \ | |/ ___| // | |\/| | _| \___ \| |_| || || \| | | _ // | | | | |___ ___) | _ || || |\ | |_| | // |_| |_|_____|____/|_| |_|___|_| \_|\____| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Antonia Larese De Tetto // #if !defined(KRATOS_BINBASED_PROJECTION ) #define KRATOS_BINBASED_PROJECTION //External includes // System includes #include <string> #include <iostream> #include <stdlib.h> // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "utilities/timer.h" #include "meshing_application_variables.h" //Database includes #include "spatial_containers/spatial_containers.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/binbased_nodes_in_element_locator.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// This class allows the interpolation between non-matching meshes in 2D and 3D. /** @author Antonia Larese De Tetto <antoldt@cimne.upc.edu> * * This class allows the interpolation of a scalar or vectorial variable between non-matching meshes * in 2D and 3D. * * For every node of the destination model part it is checked in which element of the origin model part it is * contained and a linear interpolation is performed * * The data structure used by default is static bin. * In order to use this utility the construction of a bin of object @see BinBasedNodesInElementLocator * and a bin of nodes @see BinBasedFastPointLocator * is required at the beginning of the calculation (only ONCE). */ //class BinBasedMeshTransfer template<std::size_t TDim > class BinBasedMeshTransfer { public: ///@name Type Definitions ///@{ /// Pointer definition of BinBasedMeshTransfer KRATOS_CLASS_POINTER_DEFINITION(BinBasedMeshTransfer<TDim >); /// Node type definition typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; ///@} ///@name Life Cycle ///@{ /// Default constructor. BinBasedMeshTransfer() = default; // /// Destructor. virtual ~BinBasedMeshTransfer() = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ //If you want to pass the whole model part //********************************************************************** //********************************************************************** /// Interpolate the whole problem type /** * @param rOrigin_ModelPart: the model part all the variable should be taken from * @param rDestination_ModelPart: the destination model part where we want to know the values of the variables */ void DirectInterpolation( ModelPart& rOrigin_ModelPart , ModelPart& rDestination_ModelPart ) { KRATOS_TRY KRATOS_ERROR << "Not implemented yet" << std::endl; KRATOS_CATCH("") } //If you want to pass only one variable //********************************************************************** //********************************************************************** /// Interpolate one variable from the fixed mesh to the moving one /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of objects. It is to be constructed separately @see binbased_fast_point_locator.h */ // Form fixed to moving model part template<class TDataType> void DirectVariableInterpolation( ModelPart& rFixed_ModelPart , ModelPart& rMoving_ModelPart, Variable<TDataType>& rFixedDomainVariable , Variable<TDataType>& rMovingDomainVariable, BinBasedFastPointLocator<TDim>& node_locator ) { KRATOS_TRY KRATOS_INFO("BinBasedMeshTransfer") << "Interpolate From Fixed Mesh*************************************" << std::endl; //creating an auxiliary list for the new nodes for(auto node_it = rMoving_ModelPart.NodesBegin(); node_it != rMoving_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rMovingDomainVariable); } Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rMoving_ModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i; NodeType::Pointer pparticle = *(iparticle.base()); auto result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { //Interpolate( ElemIt, N, *it_found , rFixedDomainVariable , rMovingDomainVariable ); Interpolate( pelement, N, pparticle, rFixedDomainVariable , rMovingDomainVariable ); } } KRATOS_CATCH("") } /// Map one variable from the moving mesh to the fixed one -The two meshes should be of the same dimensions otherwise better to use /// MappingFromMovingMesh_VariableMeshes that is a much generic tool. /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of objects (elelments of the fixed mesh). It is to be constructed separately @see binbased_nodes_in_element_locator */ // From moving to fixed model part template<class TDataType> void MappingFromMovingMesh( ModelPart& rMoving_ModelPart , ModelPart& rFixed_ModelPart, Variable<TDataType>& rMovingDomainVariable , Variable<TDataType>& rFixedDomainVariable, BinBasedFastPointLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part ) { KRATOS_TRY KRATOS_INFO("BinBasedMeshTransfer") << "Transfer From Moving Mesh*************************************" << std::endl; if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", ""); if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", ""); //creating an auxiliary list for the new nodes for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rFixedDomainVariable); } for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); node_it++) { // if (node_it->IsFixed(VELOCITY_X) == false) // { // (node_it)->FastGetSolutionStepValue(VELOCITY) = ZeroVector(3); // (node_it)->FastGetSolutionStepValue(TEMPERATURE) = 0.0; (node_it)->GetValue(YOUNG_MODULUS) = 0.0; // } } //defintions for spatial search // typedef NodeType PointType; // typedef NodeType::Pointer PointTypePointer; Vector N(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rMoving_ModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rMoving_ModelPart.NodesBegin() + i; NodeType::Pointer pparticle = *(iparticle.base()); auto result_begin = results.begin(); Element::Pointer pelement; bool is_found = node_locator.FindPointOnMesh(pparticle->Coordinates(), N, pelement, result_begin, max_results); if (is_found == true) { GeometryType& geom = pelement->GetGeometry(); // const array_1d<double, 3 > & vel_particle = (iparticle)->FastGetSolutionStepValue(VELOCITY); // const double& temperature_particle = (iparticle)->FastGetSolutionStepValue(TEMPERATURE); const TDataType& value = (iparticle)->FastGetSolutionStepValue(rMovingDomainVariable); for (std::size_t k = 0; k < geom.size(); k++) { geom[k].SetLock(); geom[k].FastGetSolutionStepValue(rFixedDomainVariable) += N[k] * value; geom[k].GetValue(YOUNG_MODULUS) += N[k]; geom[k].UnSetLock(); } } } for (ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); node_it++) { const double NN = (node_it)->GetValue(YOUNG_MODULUS); if (NN != 0.0) { (node_it)->FastGetSolutionStepValue(rFixedDomainVariable) /= NN; } } KRATOS_CATCH("") } // From moving to fixed model part /// Interpolate one variable from the moving mesh to the fixed one /** * @param rFixed_ModelPart: the model part all the variable should be taken from * @param rMoving_ModelPart: the destination model part where we want to know the values of the variables * @param rFixedDomainVariable: the name of the interpolated variable in the origin model part * @param rMovingDomainVariable: the name of the interpolated variable in the destination model part * @param node_locator: precomputed bin of nodes of the fixed mesh. It is to be constructed separately @see binbased_nodes_in_element_locator */ template<class TDataType> void MappingFromMovingMesh_VariableMeshes( ModelPart& rMoving_ModelPart , ModelPart& rFixed_ModelPart, Variable<TDataType>& rMovingDomainVariable , Variable<TDataType>& rFixedDomainVariable, BinBasedNodesInElementLocator<TDim>& node_locator //this is a bin of objects which contains the FIXED model part ) { KRATOS_TRY KRATOS_WATCH("Transfer From Moving Mesh*************************************") if (rMoving_ModelPart.NodesBegin()->SolutionStepsDataHas(rMovingDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add MovingDomain VARIABLE!!!!!! ERROR", ""); if (rFixed_ModelPart.NodesBegin()->SolutionStepsDataHas(rFixedDomainVariable) == false) KRATOS_THROW_ERROR(std::logic_error, "Add FixedDomain VARIABLE!!!!!! ERROR", ""); //creating an auxiliary list for the new nodes for(ModelPart::NodesContainerType::iterator node_it = rFixed_ModelPart.NodesBegin(); node_it != rFixed_ModelPart.NodesEnd(); ++node_it) { ClearVariables(node_it, rFixedDomainVariable); } //defintions for spatial search typedef typename BinBasedNodesInElementLocator<TDim>::PointVector PointVector; typedef typename BinBasedNodesInElementLocator<TDim>::DistanceVector DistanceVector; const std::size_t max_results = 5000; Matrix Nmat(max_results,TDim+1); boost::numeric::ublas::vector<int> positions(max_results); PointVector work_results(max_results); DistanceVector work_distances(max_results); Node<3> work_point(0,0.0,0.0,0.0); for(ModelPart::ElementsContainerType::iterator elem_it = rMoving_ModelPart.ElementsBegin(); elem_it != rMoving_ModelPart.ElementsEnd(); ++elem_it) { std::size_t nfound = node_locator.FindNodesInElement(*(elem_it.base()), positions, Nmat, max_results, work_results.begin(), work_distances.begin(), work_point); for(std::size_t k=0; k<nfound; k++) { auto it = work_results.begin() + positions[k]; array_1d<double,TDim+1> N = row(Nmat,k); Interpolate( *(elem_it.base()), N, *it, rMovingDomainVariable , rFixedDomainVariable); } } KRATOS_CATCH("") } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member rVariables ///@{ ///@} ///@name Protected member rVariables ///@{ template<class T, std::size_t dim> ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member rVariables ///@{ ///@} ///@name Member rVariables ///@{ inline void CalculateCenterAndSearchRadius(GeometryType&geom, double& xc, double& yc, double& zc, double& R, array_1d<double,3>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); xc = 0.3333333333333333333*(x0+x1+x2); yc = 0.3333333333333333333*(y0+y1+y2); zc = 0.0; double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0); double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1); double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2); R = R1; if(R2 > R) R = R2; if(R3 > R) R = R3; R = 1.01 * sqrt(R); } //*************************************** //*************************************** inline void CalculateCenterAndSearchRadius(GeometryType&geom, double& xc, double& yc, double& zc, double& R, array_1d<double,4>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); xc = 0.25*(x0+x1+x2+x3); yc = 0.25*(y0+y1+y2+y3); zc = 0.25*(z0+z1+z2+z3); double R1 = (xc-x0)*(xc-x0) + (yc-y0)*(yc-y0) + (zc-z0)*(zc-z0); double R2 = (xc-x1)*(xc-x1) + (yc-y1)*(yc-y1) + (zc-z1)*(zc-z1); double R3 = (xc-x2)*(xc-x2) + (yc-y2)*(yc-y2) + (zc-z2)*(zc-z2); double R4 = (xc-x3)*(xc-x3) + (yc-y3)*(yc-y3) + (zc-z3)*(zc-z3); R = R1; if(R2 > R) R = R2; if(R3 > R) R = R3; if(R4 > R) R = R4; R = sqrt(R); } //*************************************** //*************************************** inline double CalculateVol( const double x0, const double y0, const double x1, const double y1, const double x2, const double y2 ) { return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) ); } //*************************************** //*************************************** inline double CalculateVol( const double x0, const double y0, const double z0, const double x1, const double y1, const double z1, const double x2, const double y2, const double z2, const double x3, const double y3, const double z3 ) { double x10 = x1 - x0; double y10 = y1 - y0; double z10 = z1 - z0; double x20 = x2 - x0; double y20 = y2 - y0; double z20 = z2 - z0; double x30 = x3 - x0; double y30 = y3 - y0; double z30 = z3 - z0; double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30; return detJ*0.1666666666666666666667; //return 0.5*( (x1-x0)*(y2-y0)- (y1-y0)*(x2-x0) ); } //*************************************** //*************************************** inline bool CalculatePosition( GeometryType&geom, const double xc, const double yc, const double zc, array_1d<double,3>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double area = CalculateVol(x0,y0,x1,y1,x2,y2); double inv_area = 0.0; if(area == 0.0) { // KRATOS_THROW_ERROR(std::logic_error,"element with zero area found",""); //The interpolated node will not be inside an elemente with zero area return false; } else { inv_area = 1.0 / area; } N[0] = CalculateVol(x1,y1,x2,y2,xc,yc) * inv_area; N[1] = CalculateVol(x2,y2,x0,y0,xc,yc) * inv_area; N[2] = CalculateVol(x0,y0,x1,y1,xc,yc) * inv_area; if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <=1.0 && N[1]<= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true return true; return false; } //*************************************** //*************************************** inline bool CalculatePosition( GeometryType&geom, const double xc, const double yc, const double zc, array_1d<double,4>& N ) { double x0 = geom[0].X(); double y0 = geom[0].Y(); double z0 = geom[0].Z(); double x1 = geom[1].X(); double y1 = geom[1].Y(); double z1 = geom[1].Z(); double x2 = geom[2].X(); double y2 = geom[2].Y(); double z2 = geom[2].Z(); double x3 = geom[3].X(); double y3 = geom[3].Y(); double z3 = geom[3].Z(); double vol = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3); double inv_vol = 0.0; if(vol < 0.0000000000001) { // KRATOS_THROW_ERROR(std::logic_error,"element with zero vol found",""); //The interpolated node will not be inside an elemente with zero volume return false; // KRATOS_WATCH("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") } else { inv_vol = 1.0 / vol; } N[0] = CalculateVol(x1,y1,z1,x3,y3,z3,x2,y2,z2,xc,yc,zc) * inv_vol; N[1] = CalculateVol(x3,y3,z3,x0,y0,z0,x2,y2,z2,xc,yc,zc) * inv_vol; N[2] = CalculateVol(x3,y3,z3,x1,y1,z1,x0,y0,z0,xc,yc,zc) * inv_vol; N[3] = CalculateVol(x0,y0,z0,x1,y1,z1,x2,y2,z2,xc,yc,zc) * inv_vol; if(N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >=0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <=1.0) //if the xc yc zc is inside the tetrahedron return true return true; return false; } //ElemI Element iterator //N Shape functions //step_data_size //pnode pointer to the node //projecting total model part 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, int step_data_size, NodeType::Pointer pnode) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double* step_data = (pnode)->SolutionStepData().Data(step); double* node0_data = geom[0].SolutionStepData().Data(step); //copying this data in the position of the vector we are interested in for(int j= 0; j< step_data_size; j++) { step_data[j] = N[0]*node0_data[j]; } for(std::size_t k= 1; k< vector_size; k++) { double* node1_data = geom[k].SolutionStepData().Data(step); for(int j= 0; j< step_data_size; j++) { step_data[j] += N[k]*node1_data[j]; } } } // pnode->GetValue(IS_VISITED) = 1.0; } //projecting an array1D 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, NodeType::Pointer pnode, Variable<array_1d<double,3> >& rOriginVariable, Variable<array_1d<double,3> >& rDestinationVariable) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step array_1d<double,3>& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step); //Reference or no reference???//CANCELLA step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step); // Copying this data in the position of the vector we are interested in for(std::size_t j= 1; j< vector_size; j++) { const array_1d<double,3>& node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step); step_data += N[j] * node_data; } } // pnode->GetValue(IS_VISITED) = 1.0; } //projecting a scalar 2Dversion void Interpolate( Element::Pointer ElemIt, const Vector& N, NodeType::Pointer pnode, Variable<double>& rOriginVariable, Variable<double>& rDestinationVariable) { //Geometry element of the rOrigin_ModelPart GeometryType& geom = ElemIt->GetGeometry(); const std::size_t buffer_size = pnode->GetBufferSize(); const std::size_t vector_size = N.size(); //facendo un loop sugli step temporali step_data come salva i dati al passo anteriore? Cioś dove passiamo l'informazione ai nodi??? for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double& step_data = (pnode)->FastGetSolutionStepValue(rDestinationVariable , step); //Reference or no reference???//CANCELLA //copying this data in the position of the vector we are interested in step_data = N[0] * geom[0].FastGetSolutionStepValue(rOriginVariable , step); // Copying this data in the position of the vector we are interested in for(std::size_t j= 1; j< vector_size; j++) { const double node_data = geom[j].FastGetSolutionStepValue(rOriginVariable , step); step_data += N[j] * node_data; } } // pnode->GetValue(IS_VISITED) = 1.0; } inline void Clear(ModelPart::NodesContainerType::iterator node_it, int step_data_size ) { std::size_t buffer_size = node_it->GetBufferSize(); for(std::size_t step = 0; step<buffer_size; step++) { //getting the data of the solution step double* step_data = (node_it)->SolutionStepData().Data(step); //copying this data in the position of the vector we are interested in for(int j= 0; j< step_data_size; j++) { step_data[j] = 0.0; } } } inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it , Variable<array_1d<double,3> >& rVariable) { array_1d<double, 3>& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0); noalias(Aux_var) = ZeroVector(3); } inline void ClearVariables(ModelPart::NodesContainerType::iterator node_it, Variable<double>& rVariable) { double& Aux_var = node_it->FastGetSolutionStepValue(rVariable, 0); Aux_var = 0.0; } ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. BinBasedMeshTransfer& operator=(BinBasedMeshTransfer const& rOther); ///@} }; // Class BinBasedMeshTransfer ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// output stream function template<std::size_t TDim> inline std::ostream& operator << (std::ostream& rOStream, const BinBasedMeshTransfer<TDim>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_BINBASED_PROJECTION defined
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
blas.c
#include "blas.h" #include "utils.h" #include <math.h> #include <assert.h> #include <float.h> #include <stdio.h> #include <stdlib.h> #include <string.h> void reorg_cpu(float *x, int out_w, int out_h, int out_c, int batch, int stride, int forward, float *out) { int b,i,j,k; int in_c = out_c/(stride*stride); //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward); //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride); for(b = 0; b < batch; ++b){ for(k = 0; k < out_c; ++k){ for(j = 0; j < out_h; ++j){ for(i = 0; i < out_w; ++i){ int in_index = i + out_w*(j + out_h*(k + out_c*b)); int c2 = k % in_c; int offset = k / in_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b)); if(forward) out[out_index] = x[in_index]; // used by default for forward (i.e. forward = 0) else out[in_index] = x[out_index]; } } } } } void flatten(float *x, int size, int layers, int batch, int forward) { float* swap = (float*)xcalloc(size * layers * batch, sizeof(float)); int i,c,b; for(b = 0; b < batch; ++b){ for(c = 0; c < layers; ++c){ for(i = 0; i < size; ++i){ int i1 = b*layers*size + c*size + i; int i2 = b*layers*size + i*layers + c; if (forward) swap[i2] = x[i1]; else swap[i1] = x[i2]; } } } memcpy(x, swap, size*layers*batch*sizeof(float)); free(swap); } void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c) { int i; for(i = 0; i < n; ++i){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc) { int i; for(i = 0; i < n; ++i){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } static float relu(float src) { if (src > 0) return src; return 0; } void shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_output, float *out, float *in, float *weights, int nweights, WEIGHTS_NORMALIZATION_T weights_normalizion) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; #pragma omp parallel for for (id = 0; id < size; ++id) { int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float sum = 1, max_val = -FLT_MAX; int i; if (weights && weights_normalizion) { if (weights_normalizion == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalizion == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } } if (weights) { float w = weights[src_i / step]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[id] = in[id] * w; // [0 or c or (c, h ,w)] } else out[id] = in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *add = layers_output[i]; if (weights) { const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[out_index] += add[add_index] * w; // [0 or c or (c, h ,w)] } else out[out_index] += add[add_index]; } } } } void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_delta, float *delta_out, float *delta_in, float *weights, float *weight_updates, int nweights, float *in, float **layers_output, WEIGHTS_NORMALIZATION_T weights_normalizion) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; #pragma omp parallel for for (id = 0; id < size; ++id) { int src_id = id; int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float grad = 1, sum = 1, max_val = -FLT_MAX;; int i; if (weights && weights_normalizion) { if (weights_normalizion == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalizion == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } grad = 0; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float delta_w = delta_in[id] * in[id]; const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) grad += delta_w * relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) grad += delta_w * expf(w - max_val) / sum; } } if (weights) { float w = weights[src_i / step]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[src_i / step] += delta_in[id] * in[id] * grad; } else delta_out[id] += delta_in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *layer_delta = layers_delta[i]; if (weights) { float *add = layers_output[i]; const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; layer_delta[add_index] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[weights_index] += delta_in[id] * add[add_index] * grad; } else layer_delta[add_index] += delta_in[id]; } } } } void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i,j,k,b; for(b = 0; b < batch; ++b){ for(k = 0; k < minc; ++k){ for(j = 0; j < minh; ++j){ for(i = 0; i < minw; ++i){ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } } } } } void mean_cpu(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1./(batch * spatial); int i,j,k; for(i = 0; i < filters; ++i){ mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } } void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1./(batch * spatial - 1); int i,j,k; for(i = 0; i < filters; ++i){ variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += pow((x[index] - mean[i]), 2); } } variance[i] *= scale; } } void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { int b, f, i; for(b = 0; b < batch; ++b){ for(f = 0; f < filters; ++f){ for(i = 0; i < spatial; ++i){ int index = b*filters*spatial + f*spatial + i; x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f); } } } } void const_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] = ALPHA; } void mul_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX]; } void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA); } void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX]; } void scal_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA; } void scal_add_cpu(int N, float ALPHA, float BETA, float *X, int INCX) { int i; for (i = 0; i < N; ++i) X[i*INCX] = X[i*INCX] * ALPHA + BETA; } void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; if (INCX == 1 && ALPHA == 0) { memset(X, 0, N * sizeof(float)); } else { for (i = 0; i < N; ++i) X[i*INCX] = ALPHA; } } void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ if(X) X[j*NX + i] += OUT[index]; ++index; } for(i = 0; i < NY; ++i){ if(Y) Y[j*NY + i] += OUT[index]; ++index; } } } void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ OUT[index++] = X[j*NX + i]; } for(i = 0; i < NY; ++i){ OUT[index++] = Y[j*NY + i]; } } } void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } void mult_add_into_cpu(int N, float *X, float *Y, float *Z) { int i; for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i]; } void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; float abs_val = fabs(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } void l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = fabs(diff); delta[i] = diff > 0 ? 1 : -1; } } void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p) - (1-t)*log(1-p); delta[i] = t-p; } } void l2_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = diff * diff; delta[i] = diff; } } float dot_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; float dot = 0; for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY]; return dot; } void softmax(float *input, int n, float temp, float *output, int stride) { int i; float sum = 0; float largest = -FLT_MAX; for(i = 0; i < n; ++i){ if(input[i*stride] > largest) largest = input[i*stride]; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int g, b; for(b = 0; b < batch; ++b){ for(g = 0; g < groups; ++g){ softmax(input + b*batch_offset + g*group_offset, n, temp, output + b*batch_offset + g*group_offset, stride); } } } void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < c; ++k) { for (j = 0; j < h*stride; ++j) { for (i = 0; i < w*stride; ++i) { int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if (forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } void constrain_cpu(int size, float ALPHA, float *X) { int i; for (i = 0; i < size; ++i) { X[i] = fminf(ALPHA, fmaxf(-ALPHA, X[i])); } } void fix_nan_and_inf_cpu(float *input, size_t size) { int i; for (i = 0; i < size; ++i) { float val = input[i]; if (isnan(val) || isinf(val)) input[i] = 1.0f / i; // pseudo random value } }
#include "blas.h" #include "utils.h" #include <math.h> #include <assert.h> #include <float.h> #include <stdio.h> #include <stdlib.h> #include <string.h> void reorg_cpu(float *x, int out_w, int out_h, int out_c, int batch, int stride, int forward, float *out) { int b,i,j,k; int in_c = out_c/(stride*stride); //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward); //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride); for(b = 0; b < batch; ++b){ for(k = 0; k < out_c; ++k){ for(j = 0; j < out_h; ++j){ for(i = 0; i < out_w; ++i){ int in_index = i + out_w*(j + out_h*(k + out_c*b)); int c2 = k % in_c; int offset = k / in_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b)); if(forward) out[out_index] = x[in_index]; // used by default for forward (i.e. forward = 0) else out[in_index] = x[out_index]; } } } } } void flatten(float *x, int size, int layers, int batch, int forward) { float* swap = (float*)xcalloc(size * layers * batch, sizeof(float)); int i,c,b; for(b = 0; b < batch; ++b){ for(c = 0; c < layers; ++c){ for(i = 0; i < size; ++i){ int i1 = b*layers*size + c*size + i; int i2 = b*layers*size + i*layers + c; if (forward) swap[i2] = x[i1]; else swap[i1] = x[i2]; } } } memcpy(x, swap, size*layers*batch*sizeof(float)); free(swap); } void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c) { int i; for(i = 0; i < n; ++i){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc) { int i; for(i = 0; i < n; ++i){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } static float relu(float src) { if (src > 0) return src; return 0; } void shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_output, float *out, float *in, float *weights, int nweights, WEIGHTS_NORMALIZATION_T weights_normalizion) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; for (id = 0; id < size; ++id) { int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float sum = 1, max_val = -FLT_MAX; int i; if (weights && weights_normalizion) { if (weights_normalizion == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalizion == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } } if (weights) { float w = weights[src_i / step]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[id] = in[id] * w; // [0 or c or (c, h ,w)] } else out[id] = in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *add = layers_output[i]; if (weights) { const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[out_index] += add[add_index] * w; // [0 or c or (c, h ,w)] } else out[out_index] += add[add_index]; } } } } void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_delta, float *delta_out, float *delta_in, float *weights, float *weight_updates, int nweights, float *in, float **layers_output, WEIGHTS_NORMALIZATION_T weights_normalizion) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; for (id = 0; id < size; ++id) { int src_id = id; int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float grad = 1, sum = 1, max_val = -FLT_MAX;; int i; if (weights && weights_normalizion) { if (weights_normalizion == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalizion == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } grad = 0; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float delta_w = delta_in[id] * in[id]; const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) grad += delta_w * relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) grad += delta_w * expf(w - max_val) / sum; } } if (weights) { float w = weights[src_i / step]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[src_i / step] += delta_in[id] * in[id] * grad; } else delta_out[id] += delta_in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *layer_delta = layers_delta[i]; if (weights) { float *add = layers_output[i]; const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; layer_delta[add_index] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[weights_index] += delta_in[id] * add[add_index] * grad; } else layer_delta[add_index] += delta_in[id]; } } } } void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i,j,k,b; for(b = 0; b < batch; ++b){ for(k = 0; k < minc; ++k){ for(j = 0; j < minh; ++j){ for(i = 0; i < minw; ++i){ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } } } } } void mean_cpu(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1./(batch * spatial); int i,j,k; for(i = 0; i < filters; ++i){ mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } } void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1./(batch * spatial - 1); int i,j,k; for(i = 0; i < filters; ++i){ variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += pow((x[index] - mean[i]), 2); } } variance[i] *= scale; } } void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { int b, f, i; for(b = 0; b < batch; ++b){ for(f = 0; f < filters; ++f){ for(i = 0; i < spatial; ++i){ int index = b*filters*spatial + f*spatial + i; x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f); } } } } void const_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] = ALPHA; } void mul_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX]; } void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA); } void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX]; } void scal_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA; } void scal_add_cpu(int N, float ALPHA, float BETA, float *X, int INCX) { int i; for (i = 0; i < N; ++i) X[i*INCX] = X[i*INCX] * ALPHA + BETA; } void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; if (INCX == 1 && ALPHA == 0) { memset(X, 0, N * sizeof(float)); } else { for (i = 0; i < N; ++i) X[i*INCX] = ALPHA; } } void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ if(X) X[j*NX + i] += OUT[index]; ++index; } for(i = 0; i < NY; ++i){ if(Y) Y[j*NY + i] += OUT[index]; ++index; } } } void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ OUT[index++] = X[j*NX + i]; } for(i = 0; i < NY; ++i){ OUT[index++] = Y[j*NY + i]; } } } void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } void mult_add_into_cpu(int N, float *X, float *Y, float *Z) { int i; for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i]; } void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; float abs_val = fabs(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } void l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = fabs(diff); delta[i] = diff > 0 ? 1 : -1; } } void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p) - (1-t)*log(1-p); delta[i] = t-p; } } void l2_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = diff * diff; delta[i] = diff; } } float dot_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; float dot = 0; for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY]; return dot; } void softmax(float *input, int n, float temp, float *output, int stride) { int i; float sum = 0; float largest = -FLT_MAX; for(i = 0; i < n; ++i){ if(input[i*stride] > largest) largest = input[i*stride]; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int g, b; for(b = 0; b < batch; ++b){ for(g = 0; g < groups; ++g){ softmax(input + b*batch_offset + g*group_offset, n, temp, output + b*batch_offset + g*group_offset, stride); } } } void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < c; ++k) { for (j = 0; j < h*stride; ++j) { for (i = 0; i < w*stride; ++i) { int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if (forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } void constrain_cpu(int size, float ALPHA, float *X) { int i; for (i = 0; i < size; ++i) { X[i] = fminf(ALPHA, fmaxf(-ALPHA, X[i])); } } void fix_nan_and_inf_cpu(float *input, size_t size) { int i; for (i = 0; i < size; ++i) { float val = input[i]; if (isnan(val) || isinf(val)) input[i] = 1.0f / i; // pseudo random value } }
#include "blas.h" #include "utils.h" #include <math.h> #include <assert.h> #include <float.h> #include <stdio.h> #include <stdlib.h> #include <string.h> void reorg_cpu(float *x, int out_w, int out_h, int out_c, int batch, int stride, int forward, float *out) { int b,i,j,k; int in_c = out_c/(stride*stride); //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward); //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride); for(b = 0; b < batch; ++b){ for(k = 0; k < out_c; ++k){ for(j = 0; j < out_h; ++j){ for(i = 0; i < out_w; ++i){ int in_index = i + out_w*(j + out_h*(k + out_c*b)); int c2 = k % in_c; int offset = k / in_c; int w2 = i*stride + offset % stride; int h2 = j*stride + offset / stride; int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b)); if(forward) out[out_index] = x[in_index]; // used by default for forward (i.e. forward = 0) else out[in_index] = x[out_index]; } } } } } void flatten(float *x, int size, int layers, int batch, int forward) { float* swap = (float*)xcalloc(size * layers * batch, sizeof(float)); int i,c,b; for(b = 0; b < batch; ++b){ for(c = 0; c < layers; ++c){ for(i = 0; i < size; ++i){ int i1 = b*layers*size + c*size + i; int i2 = b*layers*size + i*layers + c; if (forward) swap[i2] = x[i1]; else swap[i1] = x[i2]; } } } memcpy(x, swap, size*layers*batch*sizeof(float)); free(swap); } void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c) { int i; for(i = 0; i < n; ++i){ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0); } } void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc) { int i; for(i = 0; i < n; ++i){ if(da) da[i] += dc[i] * s[i]; if(db) db[i] += dc[i] * (1-s[i]); ds[i] += dc[i] * (a[i] - b[i]); } } static float relu(float src) { if (src > 0) return src; return 0; } void shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_output, float *out, float *in, float *weights, int nweights, WEIGHTS_NORMALIZATION_T weights_normalizion) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; #pragma omp parallel for for (id = 0; id < size; ++id) { int src_id = id; const int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float sum = 1, max_val = -FLT_MAX; int i; if (weights && weights_normalizion) { if (weights_normalizion == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalizion == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } } if (weights) { float w = weights[src_i / step]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[id] = in[id] * w; // [0 or c or (c, h ,w)] } else out[id] = in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *add = layers_output[i]; if (weights) { const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; out[out_index] += add[add_index] * w; // [0 or c or (c, h ,w)] } else out[out_index] += add[add_index]; } } } } void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_delta, float *delta_out, float *delta_in, float *weights, float *weight_updates, int nweights, float *in, float **layers_output, WEIGHTS_NORMALIZATION_T weights_normalizion) { // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w) const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w) int step = 0; if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1 int id; #pragma omp parallel for for (id = 0; id < size; ++id) { int src_id = id; int src_i = src_id % src_outputs; src_id /= src_outputs; int src_b = src_id; float grad = 1, sum = 1, max_val = -FLT_MAX;; int i; if (weights && weights_normalizion) { if (weights_normalizion == SOFTMAX_NORMALIZATION) { for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (max_val < w) max_val = w; } } const float eps = 0.0001; sum = eps; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) sum += relu(w); else if (weights_normalizion == SOFTMAX_NORMALIZATION) sum += expf(w - max_val); } grad = 0; for (i = 0; i < (n + 1); ++i) { const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)] const float delta_w = delta_in[id] * in[id]; const float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) grad += delta_w * relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) grad += delta_w * expf(w - max_val) / sum; } } if (weights) { float w = weights[src_i / step]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[src_i / step] += delta_in[id] * in[id] * grad; } else delta_out[id] += delta_in[id]; // layers for (i = 0; i < n; ++i) { int add_outputs = outputs_of_layers[i]; if (src_i < add_outputs) { int add_index = add_outputs*src_b + src_i; int out_index = id; float *layer_delta = layers_delta[i]; if (weights) { float *add = layers_output[i]; const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)] float w = weights[weights_index]; if (weights_normalizion == RELU_NORMALIZATION) w = relu(w) / sum; else if (weights_normalizion == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum; layer_delta[add_index] += delta_in[id] * w; // [0 or c or (c, h ,w)] weight_updates[weights_index] += delta_in[id] * add[add_index] * grad; } else layer_delta[add_index] += delta_in[id]; } } } } void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out) { int stride = w1/w2; int sample = w2/w1; assert(stride == h1/h2); assert(sample == h2/h1); if(stride < 1) stride = 1; if(sample < 1) sample = 1; int minw = (w1 < w2) ? w1 : w2; int minh = (h1 < h2) ? h1 : h2; int minc = (c1 < c2) ? c1 : c2; int i,j,k,b; for(b = 0; b < batch; ++b){ for(k = 0; k < minc; ++k){ for(j = 0; j < minh; ++j){ for(i = 0; i < minw; ++i){ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b)); int add_index = i*stride + w1*(j*stride + h1*(k + c1*b)); out[out_index] += add[add_index]; } } } } } void mean_cpu(float *x, int batch, int filters, int spatial, float *mean) { float scale = 1./(batch * spatial); int i,j,k; for(i = 0; i < filters; ++i){ mean[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; mean[i] += x[index]; } } mean[i] *= scale; } } void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance) { float scale = 1./(batch * spatial - 1); int i,j,k; for(i = 0; i < filters; ++i){ variance[i] = 0; for(j = 0; j < batch; ++j){ for(k = 0; k < spatial; ++k){ int index = j*filters*spatial + i*spatial + k; variance[i] += pow((x[index] - mean[i]), 2); } } variance[i] *= scale; } } void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial) { int b, f, i; for(b = 0; b < batch; ++b){ for(f = 0; f < filters; ++f){ for(i = 0; i < spatial; ++i){ int index = b*filters*spatial + f*spatial + i; x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f); } } } } void const_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] = ALPHA; } void mul_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX]; } void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA); } void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX]; } void scal_cpu(int N, float ALPHA, float *X, int INCX) { int i; for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA; } void scal_add_cpu(int N, float ALPHA, float BETA, float *X, int INCX) { int i; for (i = 0; i < N; ++i) X[i*INCX] = X[i*INCX] * ALPHA + BETA; } void fill_cpu(int N, float ALPHA, float *X, int INCX) { int i; if (INCX == 1 && ALPHA == 0) { memset(X, 0, N * sizeof(float)); } else { for (i = 0; i < N; ++i) X[i*INCX] = ALPHA; } } void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ if(X) X[j*NX + i] += OUT[index]; ++index; } for(i = 0; i < NY; ++i){ if(Y) Y[j*NY + i] += OUT[index]; ++index; } } } void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT) { int i, j; int index = 0; for(j = 0; j < B; ++j) { for(i = 0; i < NX; ++i){ OUT[index++] = X[j*NX + i]; } for(i = 0; i < NY; ++i){ OUT[index++] = Y[j*NY + i]; } } } void copy_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX]; } void mult_add_into_cpu(int N, float *X, float *Y, float *Z) { int i; for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i]; } void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; float abs_val = fabs(diff); if(abs_val < 1) { error[i] = diff * diff; delta[i] = diff; } else { error[i] = 2*abs_val - 1; delta[i] = (diff > 0) ? 1 : -1; } } } void l1_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = fabs(diff); delta[i] = diff > 0 ? 1 : -1; } } void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = (t) ? -log(p) : 0; delta[i] = t-p; } } void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float t = truth[i]; float p = pred[i]; error[i] = -t*log(p) - (1-t)*log(1-p); delta[i] = t-p; } } void l2_cpu(int n, float *pred, float *truth, float *delta, float *error) { int i; for(i = 0; i < n; ++i){ float diff = truth[i] - pred[i]; error[i] = diff * diff; delta[i] = diff; } } float dot_cpu(int N, float *X, int INCX, float *Y, int INCY) { int i; float dot = 0; for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY]; return dot; } void softmax(float *input, int n, float temp, float *output, int stride) { int i; float sum = 0; float largest = -FLT_MAX; for(i = 0; i < n; ++i){ if(input[i*stride] > largest) largest = input[i*stride]; } for(i = 0; i < n; ++i){ float e = exp(input[i*stride]/temp - largest/temp); sum += e; output[i*stride] = e; } for(i = 0; i < n; ++i){ output[i*stride] /= sum; } } void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output) { int g, b; for(b = 0; b < batch; ++b){ for(g = 0; g < groups; ++g){ softmax(input + b*batch_offset + g*group_offset, n, temp, output + b*batch_offset + g*group_offset, stride); } } } void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { int i, j, k, b; for (b = 0; b < batch; ++b) { for (k = 0; k < c; ++k) { for (j = 0; j < h*stride; ++j) { for (i = 0; i < w*stride; ++i) { int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride; int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i; if (forward) out[out_index] = scale*in[in_index]; else in[in_index] += scale*out[out_index]; } } } } } void constrain_cpu(int size, float ALPHA, float *X) { int i; for (i = 0; i < size; ++i) { X[i] = fminf(ALPHA, fmaxf(-ALPHA, X[i])); } } void fix_nan_and_inf_cpu(float *input, size_t size) { int i; for (i = 0; i < size; ++i) { float val = input[i]; if (isnan(val) || isinf(val)) input[i] = 1.0f / i; // pseudo random value } }
simpf.c
/* Start reading here */ #include <fftw3.h> #define NUM_POINTS 64 /* Never mind this bit */ #include <stdio.h> #include <math.h> #define REAL 0 #define IMAG 1 float theta; void acquire_from_somewhere(fftwf_complex* signal) { /* Generate two sine waves of different frequencies and * * amplitudes. * */ int i; #pragma omp for private(theta) for (i = 0; i < NUM_POINTS; ++i) { theta = (float)i / (float)NUM_POINTS * M_PI; signal[i][REAL] = 1.0 * cos(4.0 * theta) + 0.5 * cos( 8.0 * theta); signal[i][IMAG] = 1.0 * sin(2.0 * theta) + 0.5 * sin(16.0 * theta); signal[i][IMAG] = 1.0 * cos(2.0 * theta) + 0.5 * cos(16.0 * theta); // signal[i][REAL]=i; // signal[i][IMAG]=0; } } void do_something_with(fftwf_complex* result) { int i; for (i = 0; i < NUM_POINTS; ++i) { float mag = sqrt(result[i][REAL] * result[i][REAL] + result[i][IMAG] * result[i][IMAG]); printf("%23.12f %10.5f %10.5f\n", mag,result[i][REAL] ,result[i][IMAG]); } } /* Resume reading here */ int main() { fftwf_complex signal[NUM_POINTS]; fftwf_complex result[NUM_POINTS]; fftwf_plan plan = fftwf_plan_dft_1d(NUM_POINTS, signal, result, FFTW_FORWARD, FFTW_ESTIMATE); acquire_from_somewhere(signal); fftwf_execute(plan); do_something_with(result); fftwf_destroy_plan(plan); return 0; }
/* Start reading here */ #include <fftw3.h> #define NUM_POINTS 64 /* Never mind this bit */ #include <stdio.h> #include <math.h> #define REAL 0 #define IMAG 1 float theta; void acquire_from_somewhere(fftwf_complex * signal) { /* * Generate two sine waves of different frequencies and * amplitudes. */ int i; for (i = 0; i < NUM_POINTS; ++i) { theta = (float)i / (float)NUM_POINTS *M_PI; signal[i][REAL] = 1.0 * cos(4.0 * theta) + 0.5 * cos(8.0 * theta); signal[i][IMAG] = 1.0 * sin(2.0 * theta) + 0.5 * sin(16.0 * theta); signal[i][IMAG] = 1.0 * cos(2.0 * theta) + 0.5 * cos(16.0 * theta); //signal[i][REAL] = i; //signal[i][IMAG] = 0; } } void do_something_with(fftwf_complex * result) { int i; for (i = 0; i < NUM_POINTS; ++i) { float mag = sqrt(result[i][REAL] * result[i][REAL] + result[i][IMAG] * result[i][IMAG]); printf("%23.12f %10.5f %10.5f\n", mag, result[i][REAL], result[i][IMAG]); } } /* Resume reading here */ int main() { fftwf_complex signal[NUM_POINTS]; fftwf_complex result[NUM_POINTS]; fftwf_plan plan = fftwf_plan_dft_1d(NUM_POINTS, signal, result, FFTW_FORWARD, FFTW_ESTIMATE); acquire_from_somewhere(signal); fftwf_execute(plan); do_something_with(result); fftwf_destroy_plan(plan); return 0; }
/* Start reading here */ #include <fftw3.h> #define NUM_POINTS 64 /* Never mind this bit */ #include <stdio.h> #include <math.h> #define REAL 0 #define IMAG 1 float theta; void acquire_from_somewhere(fftwf_complex * signal) { /* * Generate two sine waves of different frequencies and * amplitudes. */ int i; #pragma omp for private(theta) for (i = 0; i < NUM_POINTS; ++i) { theta = (float)i / (float)NUM_POINTS *M_PI; signal[i][REAL] = 1.0 * cos(4.0 * theta) + 0.5 * cos(8.0 * theta); signal[i][IMAG] = 1.0 * sin(2.0 * theta) + 0.5 * sin(16.0 * theta); signal[i][IMAG] = 1.0 * cos(2.0 * theta) + 0.5 * cos(16.0 * theta); //signal[i][REAL] = i; //signal[i][IMAG] = 0; } } void do_something_with(fftwf_complex * result) { int i; for (i = 0; i < NUM_POINTS; ++i) { float mag = sqrt(result[i][REAL] * result[i][REAL] + result[i][IMAG] * result[i][IMAG]); printf("%23.12f %10.5f %10.5f\n", mag, result[i][REAL], result[i][IMAG]); } } /* Resume reading here */ int main() { fftwf_complex signal[NUM_POINTS]; fftwf_complex result[NUM_POINTS]; fftwf_plan plan = fftwf_plan_dft_1d(NUM_POINTS, signal, result, FFTW_FORWARD, FFTW_ESTIMATE); acquire_from_somewhere(signal); fftwf_execute(plan); do_something_with(result); fftwf_destroy_plan(plan); return 0; }
ast-dump-openmp-begin-declare-variant_2.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics #pragma omp begin declare variant match(device={kind(cpu)}) int also_before(void) { return 0; } #pragma omp end declare variant #pragma omp begin declare variant match(implementation={vendor(score(100):llvm)}) int also_after(void) { return 0; } #pragma omp end declare variant #pragma omp begin declare variant match(implementation={vendor(score(0):llvm)}) int also_before(void) { return 1; } #pragma omp end declare variant int also_after(void) { return 2; } int test(void) { // Should return 0. return also_after() + also_before(); } // Make sure: // - we do see the ast nodes for the cpu kind // - we do see the ast nodes for the llvm vendor // - we pick the right callees // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:21> col:5 implicit used also_before 'int ({{.*}})' // CHECK-NEXT: | |-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(cpu)} // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(0): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:17:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <line:6:1, line:8:1> line:6:1 also_before[device={kind(cpu)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_7:0x[a-z0-9]*]] <col:23, line:8:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_8:0x[a-z0-9]*]] <line:7:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_9:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:12:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(100): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_13]] <col:1, line:14:1> line:12:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:22, line:14:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:13:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:17:1, line:19:1> line:17:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:23, line:19:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:18:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] prev [[ADDR_10]] <line:22:1, line:24:1> line:22:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:22, line:24:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:23:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_24:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(score(100): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <line:12:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:26:1, line:29:1> line:26:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:16, line:29:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:28:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_28:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-PseudoObjectExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | |-CallExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_20]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:12:1, line:28:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:12:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:28:25, col:37> 'int' // CHECK-NEXT: |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_39:0x[a-z0-9]*]] <line:6:1, line:28:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <line:6:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_2]] <col:1> 'int ({{.*}})' Function [[ADDR_3]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})'
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int also_before(void) { return 0; } int also_after(void) { return 0; } int also_before(void) { return 1; } int also_after(void) { return 2; } int test(void) { // Should return 0. return also_after() + also_before(); } // Make sure: // - we do see the ast nodes for the cpu kind // - we do see the ast nodes for the llvm vendor // - we pick the right callees // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:21> col:5 implicit used also_before 'int ({{.*}})' // CHECK-NEXT: | |-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(cpu)} // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(0): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:17:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <line:6:1, line:8:1> line:6:1 also_before[device={kind(cpu)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_7:0x[a-z0-9]*]] <col:23, line:8:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_8:0x[a-z0-9]*]] <line:7:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_9:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:12:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(100): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_13]] <col:1, line:14:1> line:12:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:22, line:14:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:13:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:17:1, line:19:1> line:17:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:23, line:19:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:18:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] prev [[ADDR_10]] <line:22:1, line:24:1> line:22:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:22, line:24:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:23:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_24:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(score(100): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <line:12:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:26:1, line:29:1> line:26:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:16, line:29:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:28:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_28:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-PseudoObjectExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | |-CallExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_20]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:12:1, line:28:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:12:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:28:25, col:37> 'int' // CHECK-NEXT: |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_39:0x[a-z0-9]*]] <line:6:1, line:28:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <line:6:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_2]] <col:1> 'int ({{.*}})' Function [[ADDR_3]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})'
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics #pragma omp begin declare variant match(device={kind(cpu)}) int also_before(void) { return 0; } #pragma omp end declare variant #pragma omp begin declare variant match(implementation={vendor(score(100):llvm)}) int also_after(void) { return 0; } #pragma omp end declare variant #pragma omp begin declare variant match(implementation={vendor(score(0):llvm)}) int also_before(void) { return 1; } #pragma omp end declare variant int also_after(void) { return 2; } int test(void) { // Should return 0. return also_after() + also_before(); } // Make sure: // - we do see the ast nodes for the cpu kind // - we do see the ast nodes for the llvm vendor // - we pick the right callees // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, col:21> col:5 implicit used also_before 'int ({{.*}})' // CHECK-NEXT: | |-OMPDeclareVariantAttr [[ADDR_1:0x[a-z0-9]*]] <<invalid sloc>> Implicit device={kind(cpu)} // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_2:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_3:0x[a-z0-9]*]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(0): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:17:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_3]] <line:6:1, line:8:1> line:6:1 also_before[device={kind(cpu)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_7:0x[a-z0-9]*]] <col:23, line:8:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_8:0x[a-z0-9]*]] <line:7:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_9:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_10:0x[a-z0-9]*]] <line:12:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_11:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(score(100): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_13:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_13]] <col:1, line:14:1> line:12:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:22, line:14:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:13:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:17:1, line:19:1> line:17:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_17:0x[a-z0-9]*]] <col:23, line:19:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_18:0x[a-z0-9]*]] <line:18:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_19:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_20:0x[a-z0-9]*]] prev [[ADDR_10]] <line:22:1, line:24:1> line:22:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_21:0x[a-z0-9]*]] <col:22, line:24:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_22:0x[a-z0-9]*]] <line:23:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_23:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_24:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(score(100): llvm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <line:12:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_25:0x[a-z0-9]*]] <line:26:1, line:29:1> line:26:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_26:0x[a-z0-9]*]] <col:16, line:29:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_27:0x[a-z0-9]*]] <line:28:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_28:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-PseudoObjectExpr [[ADDR_29:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | |-CallExpr [[ADDR_30:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | | `-ImplicitCastExpr [[ADDR_31:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | | `-DeclRefExpr [[ADDR_32:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_20]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: | `-CallExpr [[ADDR_33:0x[a-z0-9]*]] <line:12:1, line:28:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <line:12:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_12]] <col:1> 'int ({{.*}})' Function [[ADDR_13]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})' // CHECK-NEXT: `-PseudoObjectExpr [[ADDR_35:0x[a-z0-9]*]] <line:28:25, col:37> 'int' // CHECK-NEXT: |-CallExpr [[ADDR_36:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_37:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_39:0x[a-z0-9]*]] <line:6:1, line:28:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_40:0x[a-z0-9]*]] <line:6:1> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_2]] <col:1> 'int ({{.*}})' Function [[ADDR_3]] 'also_before[device={kind(cpu)}]' 'int ({{.*}})'
shared_private_default.c
// OpenMP Shared/Private/Default Example #include <omp.h> #include <stdio.h> #include <stdlib.h> int main( int argc, char* argv[ ] ) { int id = 0; int i = 0; int m = 0; int x = 2; #pragma omp parallel private( id, i ) shared( m ) \ default( shared ) { id = omp_get_thread_num( ); if( id == 0 ) { i = 3; m = 17; x++; } printf( "Thread %d: %d %d %d\n", id, i, m, x ); } return 0; } // End shared_private_default.c - EWG SDG
// OpenMP Shared / Private / Default Example #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int id = 0; int i = 0; int m = 0; int x = 2; default (shared) { id = omp_get_thread_num(); if (id == 0) { i = 3; m = 17; x++; } printf("Thread %d: %d %d %d\n", id, i, m, x); } return 0; } //End shared_private_default.c - EWG SDG
// OpenMP Shared / Private / Default Example #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int id = 0; int i = 0; int m = 0; int x = 2; #pragma omp parallel private( id, i ) shared( m ) \ default( shared ) { id = omp_get_thread_num(); if (id == 0) { i = 3; m = 17; x++; } printf("Thread %d: %d %d %d\n", id, i, m, x); } return 0; } //End shared_private_default.c - EWG SDG
bli_axpyv_bgq_int.c
/* BLIS An object-based framework for developing high-performance BLAS-like libraries. Copyright (C) 2014, The University of Texas at Austin Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name(s) of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "blis.h" void bli_daxpyv_bgq_int ( conj_t conjx, dim_t n, double* restrict alpha, double* restrict x, inc_t incx, double* restrict y, inc_t incy, cntx_t* restrict cntx ) { if ( bli_zero_dim1( n ) ) return; // If there is anything that would interfere with our use of aligned // vector loads/stores, call the reference implementation. bool_t use_ref = FALSE; if ( incx != 1 || incy != 1 || bli_is_unaligned_to( ( siz_t )x, 32 ) || bli_is_unaligned_to( ( siz_t )y, 32 ) ) { use_ref = TRUE; } // Call the reference implementation if needed. if ( use_ref == TRUE ) { BLIS_DAXPYV_KERNEL_REF( conjx, n, alpha, x, incx, y, incy, cntx ); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; vector4double xv, yv, zv; vector4double alphav = vec_lds( 0 * sizeof(double), (double*)alpha ); #pragma omp parallel for for ( dim_t i = 0; i < n_run; i++ ) { xv = vec_lda( 0 * sizeof(double), &x[i*4] ); yv = vec_lda( 0 * sizeof(double), &y[i*4] ); zv = vec_madd( alphav, xv, yv ); vec_sta( zv, 0 * sizeof(double), &y[i*4] ); } for ( dim_t i = 0; i < n_left; i++ ) { y[4*n_run + i] += *alpha * x[4*n_run + i]; } }
#include "blis.h" void bli_daxpyv_bgq_int ( conj_t conjx, dim_t n, double *restrict alpha, double *restrict x, inc_t incx, double *restrict y, inc_t incy, cntx_t * restrict cntx ) { if (bli_zero_dim1(n)) return; //If there is anything that would interfere with our use of aligned // vector loads / stores, call the reference implementation. bool_t use_ref = FALSE; if (incx != 1 || incy != 1 || bli_is_unaligned_to((siz_t) x, 32) || bli_is_unaligned_to((siz_t) y, 32)) { use_ref = TRUE; } //Call the reference implementation if needed . if (use_ref == TRUE) { BLIS_DAXPYV_KERNEL_REF(conjx, n, alpha, x, incx, y, incy, cntx); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; vector4double xv, yv, zv; vector4double alphav = vec_lds(0 * sizeof(double), (double *)alpha); for (dim_t i = 0; i < n_run; i++) { xv = vec_lda(0 * sizeof(double), &x[i * 4]); yv = vec_lda(0 * sizeof(double), &y[i * 4]); zv = vec_madd(alphav, xv, yv); vec_sta(zv, 0 * sizeof(double), &y[i * 4]); } for (dim_t i = 0; i < n_left; i++) { y[4 * n_run + i] += *alpha * x[4 * n_run + i]; } }
#include "blis.h" void bli_daxpyv_bgq_int ( conj_t conjx, dim_t n, double *restrict alpha, double *restrict x, inc_t incx, double *restrict y, inc_t incy, cntx_t * restrict cntx ) { if (bli_zero_dim1(n)) return; //If there is anything that would interfere with our use of aligned // vector loads / stores, call the reference implementation. bool_t use_ref = FALSE; if (incx != 1 || incy != 1 || bli_is_unaligned_to((siz_t) x, 32) || bli_is_unaligned_to((siz_t) y, 32)) { use_ref = TRUE; } //Call the reference implementation if needed . if (use_ref == TRUE) { BLIS_DAXPYV_KERNEL_REF(conjx, n, alpha, x, incx, y, incy, cntx); return; } dim_t n_run = n / 4; dim_t n_left = n % 4; vector4double xv, yv, zv; vector4double alphav = vec_lds(0 * sizeof(double), (double *)alpha); #pragma omp parallel for for (dim_t i = 0; i < n_run; i++) { xv = vec_lda(0 * sizeof(double), &x[i * 4]); yv = vec_lda(0 * sizeof(double), &y[i * 4]); zv = vec_madd(alphav, xv, yv); vec_sta(zv, 0 * sizeof(double), &y[i * 4]); } for (dim_t i = 0; i < n_left; i++) { y[4 * n_run + i] += *alpha * x[4 * n_run + i]; } }
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(PSDInfo *psd_info,Image *image, const unsigned char *blocks,size_t length) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatImageProperty(image,"tiff:XResolution","%*g", GetMagickPrecision(),image->resolution.x); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatImageProperty(image,"tiff:YResolution","%*g", GetMagickPrecision(),image->resolution.y); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) psd_info->has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline ssize_t ReadPSDString(Image *image,char *p,const size_t length) { ssize_t count; count=ReadBlob(image,length,(unsigned char *) p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return(count); } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; Quantum index; index=pixel; if (packet_size == 1) index=(Quantum) ScaleQuantumToChar(index); index=(Quantum) ConstrainColormapIndex(image,(ssize_t) index, exception); if (type == 0) SetPixelIndex(image,index,q); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(image,q); if (type != 0) color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType) (QuantumRange*nibble)); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) /* arbitrary number */ { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { register unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { *(p+1)+=*p; p++; } p++; remaining-=row_size; } } static void Unpredict16Bit(const Image *image,unsigned char *pixels, const size_t count,const size_t row_size) { register unsigned char *p; size_t length, remaining; p=pixels; remaining=count; while (remaining > 0) { length=image->columns; while (--length) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; p+=2; } p+=2; remaining-=row_size; } } static void Unpredict32Bit(const Image *image,unsigned char *pixels, unsigned char *output_pixels,const size_t row_size) { register unsigned char *p, *q; register ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1=image->columns; offset2=2*offset1; offset3=3*offset1; p=pixels; q=output_pixels; for (y=0; y < (ssize_t) image->rows; y++) { start=p; remaining=row_size; while (--remaining) { *(p+1)+=*p; p++; } p=start; remaining=image->columns; while (remaining--) { *(q++)=*p; *(q++)=*(p+offset1); *(q++)=*(p+offset2); *(q++)=*(p+offset3); p++; } p=start+row_size; } } static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, packet_size, row_size; register ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(image,pixels,count,row_size); else if (packet_size == 2) Unpredict16Bit(image,pixels,count,row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels=(unsigned char *) AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } Unpredict32Bit(image,pixels,output_pixels,row_size); pixels=(unsigned char *) RelinquishMagickMemory(pixels); pixels=output_pixels; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); (void) SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static void AttachPSDLayers(Image *image,LayerInfo *layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers == 0) { layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo *psd_info, const ImageInfo *image_info,const size_t index) { if (psd_info->has_merged_image == MagickFalse) return(MagickFalse); if (image_info->number_scenes == 0) return(MagickFalse); if (index < image_info->scene) return(MagickTrue); if (index > image_info->scene+image_info->number_scenes-1) return(MagickTrue); return(MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo *psd_info,Image *image) { /* The number of layers cannot be used to determine if the merged image contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait=BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo *layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p=GetStringInfoDatum(layer_info->info); remaining_length=GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key,"luni",sizeof(key)) == 0) { unsigned char *name; unsigned int length; length=(unsigned int) (*p++) << 24; length|=(unsigned int) (*p++) << 16; length|=(unsigned int) (*p++) << 8; length|=(unsigned int) (*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name=layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++=*p++; length--; } if (length == 0) *name='\0'; break; } else p+=size; remaining_length-=(size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo *psd_info,Image *image) { char type[4]; MagickSizeType size; ssize_t count; size=GetPSDSize(psd_info,image); if (size != 0) return(size); (void) ReadBlobLong(image); count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Mt16",4) == 0) || (LocaleNCompare(type,"Mt32",4) == 0) || (LocaleNCompare(type,"Mtrn",4) == 0))) { size=GetPSDSize(psd_info,image); if (size != 0) return(0); image->alpha_trait=BlendPixelTrait; count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(0); count=ReadPSDString(image,type,4); } if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); return(size); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, index, j, number_layers; size=GetLayerInfoSize(psd_info,image); if (size == 0) { CheckMergedImageAlpha(psd_info,image); return(MagickTrue); } layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadPSDString(image,layer_info[i].blendkey,4); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); ParseAdditionalInfo(&layer_info[i]); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image,layer_info,number_layers); return(MagickTrue); } status=MagickTrue; index=0; for (i=0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info,++index) != MagickFalse)) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image,layer_info,number_layers); else layer_info=DestroyLayerInfo(layer_info,number_layers); return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return(MagickTrue); compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t image_list_length; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize),exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace,exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels=1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum( (unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(&psd_info,image,blocks,(size_t) length); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); image_list_length=GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1)) psd_info.has_merged_image=(MagickBooleanType) ReadPSDMergedImage( image_info,image,&psd_info,exception); if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } image_list_length=GetImageListLength(image); } if (psd_info.has_merged_image == MagickFalse) { Image *merged; if (image_list_length == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=(MagickRealType) TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); if (merged == (Image *) NULL) { (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; i=0; next=image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info,image_info,i++) == MagickFalse) (void) SetImageProfile(next,GetStringInfoName(profile),profile, exception); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info,image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=(size_t) WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=(size_t) WriteBlobShort(image,ZipWithoutPrediction); #endif else length=(size_t) WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, (ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,(const unsigned short) channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* * Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* * Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* * Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image * image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image * image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo * info; } LayerInfo; /* * Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *, Image *, ExceptionInfo *); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s P S D * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsPSD()() returns MagickTrue if the image format type, * identified by the % magick string, is PSD. % % The format of the IsPSD * method is: % % MagickBooleanType IsPSD(const unsigned char * *magick,const size_t length) % % A description of each parameter follows: * % % o magick: compare image format pattern against these bytes. % % * o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick, const size_t length) { if (length < 4) return (MagickFalse); if (LocaleNCompare((const char *)magick, "8BPS", 4) == 0) return (MagickTrue); return (MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e a d P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns * it. It % allocates the memory necessary for the new Image structure and * returns a % pointer to the new image. % % The format of the ReadPSDImage * method is: % % Image *ReadPSDImage(image_info,ExceptionInfo * *exception) % % A description of each parameter follows: % % o * image_info: the image info. % % o exception: return any errors or * warnings in this structure. % */ static const char * CompositeOperatorToPSDBlendMode(Image * image) { switch (image->compose) { case ColorBurnCompositeOp: return (image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return (image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return (image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return (image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return (image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return (image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return (image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return (image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return (image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return (image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return (image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return (image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return (image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return (image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return (image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return (image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return (image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return (image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return (image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return (image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return (image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return (image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return (image->endian == LSBEndian ? "mron" : "norm"); } } /* * For some reason Photoshop seems to blend semi-transparent pixels with * white. This method reverts the blending. This can be disabled by setting * the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return (MagickTrue); option = GetImageOption(image_info, "psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return (MagickTrue); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma = QuantumScale * GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); if (channel != AlphaPixelChannel) q[i] = ClampToQuantum((q[i] - ((1.0 - gamma) * QuantumRange)) / gamma); } } q += GetPixelChannels(image); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } return (status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image * image, Quantum opacity, MagickBooleanType revert, ExceptionInfo * exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " applying layer opacity %.20g", (double)opacity); if (opacity == OpaqueAlpha) return (MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image, (Quantum) (QuantumScale * (GetPixelAlpha(image, q)) * opacity), q); else if (opacity > 0) SetPixelAlpha(image, (Quantum) (QuantumRange * (GetPixelAlpha(image, q) / (MagickRealType) opacity)), q); q += GetPixelChannels(image); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } return (status); } static MagickBooleanType ApplyPSDOpacityMask(Image * image, const Image * mask, Quantum background, MagickBooleanType revert, ExceptionInfo * exception) { Image * complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return (MagickTrue); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " applying opacity mask"); complete_mask = CloneImage(image, 0, 0, MagickTrue, exception); if (complete_mask == (Image *) NULL) return (MagickFalse); complete_mask->alpha_trait = BlendPixelTrait; GetPixelInfo(complete_mask, &color); color.red = (MagickRealType) background; (void)SetImageColor(complete_mask, &color, exception); status = CompositeImage(complete_mask, mask, OverCompositeOp, MagickTrue, mask->page.x - image->page.x, mask->page.y - image->page.y, exception); if (status == MagickFalse) { complete_mask = DestroyImage(complete_mask); return (status); } for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register Quantum * p; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); p = GetAuthenticPixels(complete_mask, 0, y, complete_mask->columns, 1, exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha = (MagickRealType) GetPixelAlpha(image, q); intensity = GetPixelIntensity(complete_mask, p); if (revert == MagickFalse) SetPixelAlpha(image, ClampToQuantum(intensity * (QuantumScale * alpha)), q); else if (intensity > 0) SetPixelAlpha(image, ClampToQuantum((alpha / intensity) * QuantumRange), q); q += GetPixelChannels(image); p += GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } complete_mask = DestroyImage(complete_mask); return (status); } static void PreservePSDOpacityMask(Image * image, LayerInfo * layer_info, ExceptionInfo * exception) { char *key; RandomInfo * random_info; StringInfo * key_info; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " preserving opacity mask"); random_info = AcquireRandomInfo(); key_info = GetRandomKey(random_info, 2 + 1); key = (char *)GetStringInfoDatum(key_info); key[8] = (char)layer_info->mask.background; key[9] = '\0'; layer_info->mask.image->page.x += layer_info->page.x; layer_info->mask.image->page.y += layer_info->page.y; (void)SetImageRegistry(ImageRegistryType, (const char *)key, layer_info->mask.image, exception); (void)SetImageArtifact(layer_info->image, "psd:opacity-mask", (const char *)key); key_info = DestroyStringInfo(key_info); random_info = DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels, const ssize_t depth, const size_t number_pixels, unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets = (ssize_t) number_compact_pixels; for (i = 0; (packets > 1) && (i < (ssize_t) number_pixels);) { packets--; length = (size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length = 256 - length + 1; CheckNumberCompactPixels; pixel = (*compact_pixels++); for (j = 0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++ = (pixel >> 7) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 6) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 5) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 4) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 3) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 2) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 1) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++ = (unsigned char)((pixel >> 6) & 0x03); *pixels++ = (unsigned char)((pixel >> 4) & 0x03); *pixels++ = (unsigned char)((pixel >> 2) & 0x03); *pixels++ = (unsigned char)((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++ = (unsigned char)((pixel >> 4) & 0xff); *pixels++ = (unsigned char)((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++ = (unsigned char)pixel; break; } } } continue; } length++; for (j = 0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++ = (*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++ = (*compact_pixels >> 6) & 0x03; *pixels++ = (*compact_pixels >> 4) & 0x03; *pixels++ = (*compact_pixels >> 2) & 0x03; *pixels++ = (*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++ = (*compact_pixels >> 4) & 0xff; *pixels++ = (*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++ = (*compact_pixels); break; } } compact_pixels++; } } return (i); } static inline LayerInfo * DestroyLayerInfo(LayerInfo * layer_info, const ssize_t number_layers) { ssize_t i; for (i = 0; i < number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image = DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image = DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info = DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image * image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return (2); } if (image->depth > 16) return (4); if (image->depth > 8) return (2); return (1); } static inline MagickSizeType GetPSDSize(const PSDInfo * psd_info, Image * image) { if (psd_info->version == 1) return ((MagickSizeType) ReadBlobLong(image)); return ((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image * image) { if (image->depth == 1) return (((image->columns + 7) / 8) * GetPSDPacketSize(image)); else return (image->columns * GetPSDPacketSize(image)); } static const char * ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image * image, ExceptionInfo * exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask = SetImageChannelMask(image, (ChannelType) (AllChannels & ~ AlphaChannel)); status = NegateImage(image, MagickFalse, exception); (void)SetImageChannelMask(image, channel_mask); return (status); } static StringInfo * ParseImageResourceBlocks(PSDInfo * psd_info, Image * image, const unsigned char *blocks, size_t length) { const unsigned char *p; ssize_t offset; StringInfo * profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return ((StringInfo *) NULL); profile = BlobToStringInfo((const unsigned char *)NULL, length); SetStringInfoDatum(profile, blocks); SetStringInfoName(profile, "8bim"); for (p = blocks; (p >= blocks) && (p < (blocks + length - 7));) { if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) break; p += 4; p = PushShortPixel(MSBEndian, p, &id); p = PushCharPixel(p, &name_length); if ((name_length % 2) == 0) name_length++; p += name_length; if (p > (blocks + length - 4)) break; p = PushLongPixel(MSBEndian, p, &count); offset = (ssize_t) count; if (((p + offset) < blocks) || ((p + offset) > (blocks + length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* * Resolution info. */ if (offset < 16) break; p = PushShortPixel(MSBEndian, p, &resolution); image->resolution.x = (double)resolution; (void)FormatImageProperty(image, "tiff:XResolution", "%*g", GetMagickPrecision(), image->resolution.x); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &resolution); image->resolution.y = (double)resolution; (void)FormatImageProperty(image, "tiff:YResolution", "%*g", GetMagickPrecision(), image->resolution.y); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); image->units = PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p + 4) == 0)) psd_info->has_merged_image = MagickFalse; p += offset; break; } default: { p += offset; break; } } if ((offset & 0x01) != 0) p++; } return (profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *)NULL) return (OverCompositeOp); if (LocaleNCompare(mode, "norm", 4) == 0) return (OverCompositeOp); if (LocaleNCompare(mode, "mul ", 4) == 0) return (MultiplyCompositeOp); if (LocaleNCompare(mode, "diss", 4) == 0) return (DissolveCompositeOp); if (LocaleNCompare(mode, "diff", 4) == 0) return (DifferenceCompositeOp); if (LocaleNCompare(mode, "dark", 4) == 0) return (DarkenCompositeOp); if (LocaleNCompare(mode, "lite", 4) == 0) return (LightenCompositeOp); if (LocaleNCompare(mode, "hue ", 4) == 0) return (HueCompositeOp); if (LocaleNCompare(mode, "sat ", 4) == 0) return (SaturateCompositeOp); if (LocaleNCompare(mode, "colr", 4) == 0) return (ColorizeCompositeOp); if (LocaleNCompare(mode, "lum ", 4) == 0) return (LuminizeCompositeOp); if (LocaleNCompare(mode, "scrn", 4) == 0) return (ScreenCompositeOp); if (LocaleNCompare(mode, "over", 4) == 0) return (OverlayCompositeOp); if (LocaleNCompare(mode, "hLit", 4) == 0) return (HardLightCompositeOp); if (LocaleNCompare(mode, "sLit", 4) == 0) return (SoftLightCompositeOp); if (LocaleNCompare(mode, "smud", 4) == 0) return (ExclusionCompositeOp); if (LocaleNCompare(mode, "div ", 4) == 0) return (ColorDodgeCompositeOp); if (LocaleNCompare(mode, "idiv", 4) == 0) return (ColorBurnCompositeOp); if (LocaleNCompare(mode, "lbrn", 4) == 0) return (LinearBurnCompositeOp); if (LocaleNCompare(mode, "lddg", 4) == 0) return (LinearDodgeCompositeOp); if (LocaleNCompare(mode, "lLit", 4) == 0) return (LinearLightCompositeOp); if (LocaleNCompare(mode, "vLit", 4) == 0) return (VividLightCompositeOp); if (LocaleNCompare(mode, "pLit", 4) == 0) return (PinLightCompositeOp); if (LocaleNCompare(mode, "hMix", 4) == 0) return (HardMixCompositeOp); return (OverCompositeOp); } static inline ssize_t ReadPSDString(Image * image, char *p, const size_t length) { ssize_t count; count = ReadBlob(image, length, (unsigned char *)p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q = p + length; for (--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return (count); } static inline void SetPSDPixel(Image * image, const size_t channels, const ssize_t type, const size_t packet_size, const Quantum pixel, Quantum * q, ExceptionInfo * exception) { if (image->storage_class == PseudoClass) { PixelInfo * color; Quantum index; index = pixel; if (packet_size == 1) index = (Quantum) ScaleQuantumToChar(index); index = (Quantum) ConstrainColormapIndex(image, (ssize_t) index, exception); if (type == 0) SetPixelIndex(image, index, q); if ((type == 0) && (channels > 1)) return; color = image->colormap + (ssize_t) GetPixelIndex(image, q); if (type != 0) color->alpha = (MagickRealType) pixel; SetPixelViaPixelInfo(image, color, q); return; } switch (type) { case -1: { SetPixelAlpha(image, pixel, q); break; } case -2: case 0: { SetPixelRed(image, pixel, q); break; } case -3: case 1: { SetPixelGreen(image, pixel, q); break; } case -4: case 2: { SetPixelBlue(image, pixel, q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image, pixel, q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image, pixel, q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image, pixel, q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image * image, const size_t channels, const ssize_t row, const ssize_t type, const unsigned char *pixels, ExceptionInfo * exception) { Quantum pixel; register const unsigned char *p; register Quantum * q; register ssize_t x; size_t packet_size; p = pixels; q = GetAuthenticPixels(image, 0, row, image->columns, 1, exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size = GetPSDPacketSize(image); for (x = 0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel = ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p = PushShortPixel(MSBEndian, p, &nibble); pixel = ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p = PushFloatPixel(MSBEndian, p, &nibble); pixel = ClampToQuantum((MagickRealType) (QuantumRange * nibble)); } if (image->depth > 1) { SetPSDPixel(image, channels, type, packet_size, pixel, q, exception); q += GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits = (ssize_t) image->columns - x; if (number_bits > 8) number_bits = 8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image, channels, type, packet_size, (((unsigned char)pixel) & (0x01 << (7 - bit))) != 0 ? 0 : QuantumRange, q, exception); q += GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return (SyncAuthenticPixels(image, exception)); } static MagickBooleanType ReadPSDChannelRaw(Image * image, const size_t channels, const ssize_t type, ExceptionInfo * exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is RAW"); row_size = GetPSDRowSize(image); pixels = (unsigned char *)AcquireQuantumMemory(row_size, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); (void)memset(pixels, 0, row_size * sizeof(*pixels)); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { status = MagickFalse; count = ReadBlob(image, row_size, pixels); if (count != (ssize_t) row_size) break; status = ReadPSDChannelPixels(image, channels, y, type, pixels, exception); if (status == MagickFalse) break; } pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } static inline MagickOffsetType * ReadPSDRLESizes(Image * image, const PSDInfo * psd_info, const size_t size) { MagickOffsetType * sizes; ssize_t y; sizes = (MagickOffsetType *) AcquireQuantumMemory(size, sizeof(*sizes)); if (sizes != (MagickOffsetType *) NULL) { for (y = 0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y] = (MagickOffsetType) ReadBlobShort(image); else sizes[y] = (MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image * image, const PSDInfo * psd_info, const ssize_t type, MagickOffsetType * sizes, ExceptionInfo * exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is RLE compressed"); row_size = GetPSDRowSize(image); pixels = (unsigned char *)AcquireQuantumMemory(row_size, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); length = 0; for (y = 0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length = (size_t) sizes[y]; if (length > (row_size + 2048)) /* arbitrary number */ { pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "InvalidLength", image->filename); } compact_pixels = (unsigned char *)AcquireQuantumMemory(length, sizeof(*pixels)); if (compact_pixels == (unsigned char *)NULL) { pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)memset(compact_pixels, 0, length * sizeof(*compact_pixels)); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { status = MagickFalse; count = ReadBlob(image, (size_t) sizes[y], compact_pixels); if (count != (ssize_t) sizes[y]) break; count = DecodePSDPixels((size_t) sizes[y], compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth), row_size, pixels); if (count != (ssize_t) row_size) break; status = ReadPSDChannelPixels(image, psd_info->channels, y, type, pixels, exception); if (status == MagickFalse) break; } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(const Image * image, unsigned char *pixels, const size_t count, const size_t row_size) { register unsigned char *p; size_t length, remaining; p = pixels; remaining = count; while (remaining > 0) { length = image->columns; while (--length) { *(p + 1) += *p; p++; } p++; remaining -= row_size; } } static void Unpredict16Bit(const Image * image, unsigned char *pixels, const size_t count, const size_t row_size) { register unsigned char *p; size_t length, remaining; p = pixels; remaining = count; while (remaining > 0) { length = image->columns; while (--length) { p[2] += p[0] + ((p[1] + p[3]) >> 8); p[3] += p[1]; p += 2; } p += 2; remaining -= row_size; } } static void Unpredict32Bit(const Image * image, unsigned char *pixels, unsigned char *output_pixels, const size_t row_size) { register unsigned char *p, *q; register ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1 = image->columns; offset2 = 2 * offset1; offset3 = 3 * offset1; p = pixels; q = output_pixels; for (y = 0; y < (ssize_t) image->rows; y++) { start = p; remaining = row_size; while (--remaining) { *(p + 1) += *p; p++; } p = start; remaining = image->columns; while (remaining--) { *(q++) = *p; *(q++) = *(p + offset1); *(q++) = *(p + offset2); *(q++) = *(p + offset3); p++; } p = start + row_size; } } static MagickBooleanType ReadPSDChannelZip(Image * image, const size_t channels, const ssize_t type, const PSDCompressionType compression, const size_t compact_size, ExceptionInfo * exception) { MagickBooleanType status; register unsigned char *p; size_t count, packet_size, row_size; register ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); compact_pixels = (unsigned char *)AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); packet_size = GetPSDPacketSize(image); row_size = image->columns * packet_size; count = image->rows * row_size; pixels = (unsigned char *)AcquireQuantumMemory(count, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) { compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } if (ReadBlob(image, compact_size, compact_pixels) != (ssize_t) compact_size) { pixels = (unsigned char *)RelinquishMagickMemory(pixels); compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } memset(&stream, 0, sizeof(stream)); stream.data_type = Z_BINARY; stream.next_in = (Bytef *) compact_pixels; stream.avail_in = (uInt) compact_size; stream.next_out = (Bytef *) pixels; stream.avail_out = (uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret = inflate(&stream, Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void)inflateEnd(&stream); compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (MagickFalse); } if (ret == Z_STREAM_END) break; } (void)inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(image, pixels, count, row_size); else if (packet_size == 2) Unpredict16Bit(image, pixels, count, row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels = (unsigned char *)AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *)NULL) { compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } Unpredict32Bit(image, pixels, output_pixels, row_size); pixels = (unsigned char *)RelinquishMagickMemory(pixels); pixels = output_pixels; } } status = MagickTrue; p = pixels; for (y = 0; y < (ssize_t) image->rows; y++) { status = ReadPSDChannelPixels(image, channels, y, type, p, exception); if (status == MagickFalse) break; p += row_size; } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } #endif static MagickBooleanType ReadPSDChannel(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, LayerInfo * layer_info, const size_t channel, const PSDCompressionType compression, ExceptionInfo * exception) { Image * channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image = image; mask = (Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* * Ignore mask that is not a user supplied layer mask, if the mask is * disabled or if the flags have unsupported values. */ option = GetImageOption(image_info, "psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void)SeekBlob(image, (MagickOffsetType) layer_info->channel_info[channel].size - 2, SEEK_CUR); return (MagickTrue); } mask = CloneImage(image, layer_info->mask.page.width, layer_info->mask.page.height, MagickFalse, exception); if (mask != (Image *) NULL) { (void)ResetImagePixels(mask, exception); (void)SetImageType(mask, GrayscaleType, exception); channel_image = mask; } } offset = TellBlob(image); status = MagickFalse; switch (compression) { case Raw: status = ReadPSDChannelRaw(channel_image, psd_info->channels, (ssize_t) layer_info->channel_info[channel].type, exception); break; case RLE: { MagickOffsetType * sizes; sizes = ReadPSDRLESizes(channel_image, psd_info, channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); status = ReadPSDChannelRLE(channel_image, psd_info, (ssize_t) layer_info->channel_info[channel].type, sizes, exception); sizes = (MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status = ReadPSDChannelZip(channel_image, layer_info->channels, (ssize_t) layer_info->channel_info[channel].type, compression, layer_info->channel_info[channel].size - 2, exception); #else (void)ThrowMagickException(exception, GetMagickModule(), MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)", image->filename); #endif break; default: (void)ThrowMagickException(exception, GetMagickModule(), TypeWarning, "CompressionNotSupported", "'%.20g'", (double)compression); break; } (void)SeekBlob(image, offset + layer_info->channel_info[channel].size - 2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void)DestroyImage(mask); ThrowBinaryException(CoderError, "UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image = DestroyImage(layer_info->mask.image); layer_info->mask.image = mask; } return (status); } static MagickBooleanType ReadPSDLayer(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, LayerInfo * layer_info, ExceptionInfo * exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void)SetImageBackgroundColor(layer_info->image, exception); layer_info->image->compose = PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose = NoCompositeOp; /* * Set up some hidden attributes for folks that need them. */ (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double)layer_info->page.x); (void)SetImageArtifact(layer_info->image, "psd:layer.x", message); (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double)layer_info->page.y); (void)SetImageArtifact(layer_info->image, "psd:layer.y", message); (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double) layer_info->opacity); (void)SetImageArtifact(layer_info->image, "psd:layer.opacity", message); (void)SetImageProperty(layer_info->image, "label", (char *)layer_info->name, exception); status = MagickTrue; for (j = 0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading data for channel %.20g", (double)j); compression = (PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression = ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait = BlendPixelTrait; status = ReadPSDChannel(layer_info->image, image_info, psd_info, layer_info, (size_t) j, compression, exception); if (status == MagickFalse) break; } if (status != MagickFalse) status = ApplyPSDLayerOpacity(layer_info->image, layer_info->opacity, MagickFalse, exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status = NegateCMYK(layer_info->image, exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x = layer_info->mask.page.x; layer_info->mask.image->page.y = layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose = NoCompositeOp; else status = ApplyPSDOpacityMask(layer_info->image, layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange, MagickFalse, exception); option = GetImageOption(image_info, "psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image, layer_info, exception); layer_info->mask.image = DestroyImage(layer_info->mask.image); } return (status); } static MagickBooleanType CheckPSDChannels(const PSDInfo * psd_info, LayerInfo * layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return (MagickFalse); channel_type = RedChannel; if (psd_info->min_channels >= 3) channel_type |= (GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type |= BlackChannel; for (i = 0; i < (ssize_t) layer_info->channels; i++) { short type; type = layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return (MagickFalse); if (type == -1) { channel_type |= AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type &= ~RedChannel; else if (type == 1) channel_type &= ~GreenChannel; else if (type == 2) channel_type &= ~BlueChannel; else if (type == 3) channel_type &= ~BlackChannel; } if (channel_type == 0) return (MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return (MagickTrue); return (MagickFalse); } static void AttachPSDLayers(Image * image, LayerInfo * layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i = 0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j = i; j < number_layers - 1; j++) layer_info[j] = layer_info[j + 1]; number_layers--; i--; } } if (number_layers == 0) { layer_info = (LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i = 0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous = layer_info[i - 1].image; if (i < (number_layers - 1)) layer_info[i].image->next = layer_info[i + 1].image; layer_info[i].image->page = layer_info[i].page; } image->next = layer_info[0].image; layer_info[0].image->previous = image; layer_info = (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo * psd_info, const ImageInfo * image_info, const size_t index) { if (psd_info->has_merged_image == MagickFalse) return (MagickFalse); if (image_info->number_scenes == 0) return (MagickFalse); if (index < image_info->scene) return (MagickTrue); if (index > image_info->scene + image_info->number_scenes - 1) return (MagickTrue); return (MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo * psd_info, Image * image) { /* * The number of layers cannot be used to determine if the merged image * contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait = BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo * layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p = GetStringInfoDatum(layer_info->info); remaining_length = GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p += 4; key[0] = (char)(*p++); key[1] = (char)(*p++); key[2] = (char)(*p++); key[3] = (char)(*p++); key[4] = '\0'; size = (unsigned int)(*p++) << 24; size |= (unsigned int)(*p++) << 16; size |= (unsigned int)(*p++) << 8; size |= (unsigned int)(*p++); size = size & 0xffffffff; remaining_length -= 12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key, "luni", sizeof(key)) == 0) { unsigned char *name; unsigned int length; length = (unsigned int)(*p++) << 24; length |= (unsigned int)(*p++) << 16; length |= (unsigned int)(*p++) << 8; length |= (unsigned int)(*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name = layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++ = *p++; length--; } if (length == 0) *name = '\0'; break; } else p += size; remaining_length -= (size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo * psd_info, Image * image) { char type[4]; MagickSizeType size; ssize_t count; size = GetPSDSize(psd_info, image); if (size != 0) return (size); (void)ReadBlobLong(image); count = ReadPSDString(image, type, 4); if ((count != 4) || (LocaleNCompare(type, "8BIM", 4) != 0)) return (0); count = ReadPSDString(image, type, 4); if ((count == 4) && ((LocaleNCompare(type, "Mt16", 4) == 0) || (LocaleNCompare(type, "Mt32", 4) == 0) || (LocaleNCompare(type, "Mtrn", 4) == 0))) { size = GetPSDSize(psd_info, image); if (size != 0) return (0); image->alpha_trait = BlendPixelTrait; count = ReadPSDString(image, type, 4); if ((count != 4) || (LocaleNCompare(type, "8BIM", 4) != 0)) return (0); count = ReadPSDString(image, type, 4); } if ((count == 4) && ((LocaleNCompare(type, "Lr16", 4) == 0) || (LocaleNCompare(type, "Lr32", 4) == 0))) size = GetPSDSize(psd_info, image); return (size); } static MagickBooleanType ReadPSDLayersInternal(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, const MagickBooleanType skip_layers, ExceptionInfo * exception) { char type[4]; LayerInfo * layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, index, j, number_layers; size = GetLayerInfoSize(psd_info, image); if (size == 0) { CheckMergedImageAlpha(psd_info, image); return (MagickTrue); } layer_info = (LayerInfo *) NULL; number_layers = (ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* * The first alpha channel in the merged result contains the * transparency data for the merged result. */ number_layers = MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " negative layer count corrected for"); image->alpha_trait = BlendPixelTrait; } /* * We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return (MagickTrue); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " image contains %.20g layers", (double)number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError, "InvalidNumberOfLayers", image->filename); layer_info = (LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)memset(layer_info, 0, (size_t) number_layers * sizeof(*layer_info)); for (i = 0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading layer #%.20g", (double)i + 1); top = (ssize_t) ReadBlobSignedLong(image); left = (ssize_t) ReadBlobSignedLong(image); bottom = (ssize_t) ReadBlobSignedLong(image); right = (ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } layer_info[i].page.y = top; layer_info[i].page.x = left; layer_info[i].page.width = (size_t) (right - left); layer_info[i].page.height = (size_t) (bottom - top); layer_info[i].channels = ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double)layer_info[i].page.x, (double)layer_info[i].page.y, (double)layer_info[i].page.height, (double) layer_info[i].page.width, (double)layer_info[i].channels); for (j = 0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type = (short)ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size = (size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g", (double)j, (double)layer_info[i].channel_info[j].type, (double)layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info, &layer_info[i]) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } count = ReadPSDString(image, type, 4); if ((count != 4) || (LocaleNCompare(type, "8BIM", 4) != 0)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } count = ReadPSDString(image, layer_info[i].blendkey, 4); if (count != 4) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } layer_info[i].opacity = (Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping = (unsigned char)ReadBlobByte(image); layer_info[i].flags = (unsigned char)ReadBlobByte(image); layer_info[i].visible = !(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey, (double)layer_info[i].opacity, layer_info[i].clipping ? "true" : "false", layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void)ReadBlobByte(image); /* filler */ size = ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer contains additional info"); length = ReadBlobLong(image); combined_length = length + 4; if (length != 0) { /* * Layer mask info. */ layer_info[i].mask.page.y = (ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x = (ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height = (size_t) (ReadBlobSignedLong(image) - layer_info[i].mask.page.y); layer_info[i].mask.page.width = (size_t) ( ReadBlobSignedLong(image) - layer_info[i].mask.page.x); layer_info[i].mask.background = (unsigned char)ReadBlobByte( image); layer_info[i].mask.flags = (unsigned char)ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y = layer_info[i].mask.page.y - layer_info[i].page.y; layer_info[i].mask.page.x = layer_info[i].mask.page.x - layer_info[i].page.x; } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double)layer_info[i].mask.page.x, (double) layer_info[i].mask.page.y, (double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height, (double)((MagickOffsetType) length) - 18); /* * Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image, (MagickSizeType) (length - 18)) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } length = ReadBlobLong(image); combined_length += length + 4; if (length != 0) { /* * Layer blending ranges info. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer blending ranges: length=%.20g", (double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image, length) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } /* * Layer name. */ length = (MagickSizeType) (unsigned char)ReadBlobByte(image); combined_length += length + 1; if (length > 0) (void)ReadBlob(image, (size_t) length++, layer_info[i].name); layer_info[i].name[length] = '\0'; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer name: %s", layer_info[i].name); if ((length % 4) != 0) { length = 4 - (length % 4); combined_length += length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image, length) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } length = (MagickSizeType) size - combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile", image->filename); } layer_info[i].info = AcquireStringInfo((const size_t)length); info = GetStringInfoDatum(layer_info[i].info); (void)ReadBlob(image, (const size_t)length, info); ParseAdditionalInfo(&layer_info[i]); } } } for (i = 0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info = DestroyStringInfo(layer_info[i].info); continue; } /* * Allocate layered image. */ layer_info[i].image = CloneImage(image, layer_info[i].page.width, layer_info[i].page.height, MagickFalse, exception); if (layer_info[i].image == (Image *) NULL) { layer_info = DestroyLayerInfo(layer_info, number_layers); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " allocation of image for layer %.20g failed", (double)i); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void)SetImageProfile(layer_info[i].image, "psd:additional-info", layer_info[i].info, exception); layer_info[i].info = DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image, layer_info, number_layers); return (MagickTrue); } status = MagickTrue; index = 0; for (i = 0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info, ++index) != MagickFalse)) { for (j = 0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image, (MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } continue; } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading data for layer %.20g", (double)i); status = ReadPSDLayer(image, image_info, psd_info, &layer_info[i], exception); if (status == MagickFalse) break; status = SetImageProgress(image, LoadImagesTag, (MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image, layer_info, number_layers); else layer_info = DestroyLayerInfo(layer_info, number_layers); return (status); } ModuleExport MagickBooleanType ReadPSDLayers(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, ExceptionInfo * exception) { PolicyDomain domain; PolicyRights rights; domain = CoderPolicyDomain; rights = ReadPolicyRights; if (IsRightsAuthorized(domain, rights, "PSD") == MagickFalse) return (MagickTrue); return (ReadPSDLayersInternal(image, image_info, psd_info, MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo * image_info, Image * image, const PSDInfo * psd_info, ExceptionInfo * exception) { MagickOffsetType * sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return (MagickTrue); compression = (PSDCompressionType) ReadBlobMSBShort(image); image->compression = ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void)ThrowMagickException(exception, GetMagickModule(), TypeWarning, "CompressionNotSupported", "'%.20g'", (double)compression); return (MagickFalse); } sizes = (MagickOffsetType *) NULL; if (compression == RLE) { sizes = ReadPSDRLESizes(image, psd_info, image->rows * psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } status = MagickTrue; for (i = 0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type = i; if ((type == 1) && (psd_info->channels == 2)) type = -1; if (compression == RLE) status = ReadPSDChannelRLE(image, psd_info, type, sizes + (i * image->rows), exception); else status = ReadPSDChannelRaw(image, psd_info->channels, type, exception); if (status != MagickFalse) status = SetImageProgress(image, LoadImagesTag, (MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status = NegateCMYK(image, exception); if (status != MagickFalse) status = CorrectPSDAlphaBlend(image_info, image, exception); sizes = (MagickOffsetType *) RelinquishMagickMemory(sizes); return (status); } static Image * ReadPSDImage(const ImageInfo * image_info, ExceptionInfo * exception) { Image * image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t image_list_length; ssize_t count; StringInfo * profile; /* * Open image file. */ assert(image_info != (const ImageInfo *)NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image = AcquireImage(image_info, exception); status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception); if (status == MagickFalse) { image = DestroyImageList(image); return ((Image *) NULL); } /* * Read image header. */ image->endian = MSBEndian; count = ReadBlob(image, 4, (unsigned char *)psd_info.signature); psd_info.version = ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature, "8BPS", 4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); (void)ReadBlob(image, 6, psd_info.reserved); psd_info.channels = ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError, "MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError, "MaximumChannelsExceeded"); psd_info.rows = ReadBlobMSBLong(image); psd_info.columns = ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.depth = ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.mode = ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double)psd_info.columns, (double)psd_info.rows, (double) psd_info.channels, (double)psd_info.depth, ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); /* * Initialize image. */ image->depth = psd_info.depth; image->columns = psd_info.columns; image->rows = psd_info.rows; status = SetImageExtent(image, image->columns, image->rows, exception); if (status == MagickFalse) return (DestroyImageList(image)); status = ResetImagePixels(image, exception); if (status == MagickFalse) return (DestroyImageList(image)); psd_info.min_channels = 3; if (psd_info.mode == LabMode) (void)SetImageColorspace(image, LabColorspace, exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels = 4; (void)SetImageColorspace(image, CMYKColorspace, exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status = AcquireImageColormap(image, MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize), exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels = 1; (void)SetImageColorspace(image, GRAYColorspace, exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels = 1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); /* * Read PSD raster colormap only present for indexed and duotone images. */ length = ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* * Duotone image data; the format of this data is undocumented. * 32 bits per pixel; the colormap is ignored. */ (void)SeekBlob(image, (const MagickOffsetType)length, SEEK_CUR); } else { size_t number_colors; /* * Read PSD raster colormap. */ number_colors = (size_t) length / 3; if (number_colors > 65536) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); if (AcquireImageColormap(image, number_colors, exception) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].red = (MagickRealType) ScaleCharToQuantum( (unsigned char)ReadBlobByte(image)); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].green = (MagickRealType) ScaleCharToQuantum( (unsigned char)ReadBlobByte(image)); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].blue = (MagickRealType) ScaleCharToQuantum( (unsigned char)ReadBlobByte(image)); image->alpha_trait = UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image = MagickTrue; profile = (StringInfo *) NULL; length = ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* * Image resources block. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading image resource blocks - %.20g bytes", (double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); blocks = (unsigned char *)AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *)NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); count = ReadBlob(image, (size_t) length, blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *)blocks, "8BIM", 4) != 0)) { blocks = (unsigned char *)RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } profile = ParseImageResourceBlocks(&psd_info, image, blocks, (size_t) length); blocks = (unsigned char *)RelinquishMagickMemory(blocks); } /* * Layer and mask block. */ length = GetPSDSize(&psd_info, image); if (length == 8) { length = ReadBlobMSBLong(image); length = ReadBlobMSBLong(image); } offset = TellBlob(image); skip_layers = MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " read composite only"); skip_layers = MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image, image_info, &psd_info, skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } /* * Skip the rest of the layer and mask information. */ (void)SeekBlob(image, offset + length, SEEK_SET); } /* * If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); ThrowReaderException(CorruptImageError, "UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); (void)CloseBlob(image); return (GetFirstImageInList(image)); } /* * Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading the precombined layer"); image_list_length = GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1)) psd_info.has_merged_image = (MagickBooleanType) ReadPSDMergedImage( image_info, image, &psd_info, exception); if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) && (length != 0)) { (void)SeekBlob(image, offset, SEEK_SET); status = ReadPSDLayersInternal(image, image_info, &psd_info, MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } image_list_length = GetImageListLength(image); } if (psd_info.has_merged_image == MagickFalse) { Image * merged; if (image_list_length == 1) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } image->background_color.alpha = (MagickRealType) TransparentAlpha; image->background_color.alpha_trait = BlendPixelTrait; (void)SetImageBackgroundColor(image, exception); merged = MergeImageLayers(image, FlattenLayer, exception); if (merged == (Image *) NULL) { (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } ReplaceImageInList(&image, merged); } if (profile != (StringInfo *) NULL) { Image * next; i = 0; next = image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info, image_info, i++) == MagickFalse) (void)SetImageProfile(next, GetStringInfoName(profile), profile, exception); next = next->next; } profile = DestroyStringInfo(profile); } (void)CloseBlob(image); return (GetFirstImageInList(image)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e g i s t e r P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RegisterPSDImage() adds properties for the PSD image format to % * the list of supported formats. The properties include the image format % * tag, a method to read and/or write the format, whether the format % * supports the saving of more than one frame to the same file or blob, % * whether the format supports native in-memory I/O, and a brief % * description of the format. % % The format of the RegisterPSDImage method * is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo * entry; entry = AcquireMagickInfo("PSD", "PSB", "Adobe Large Document Format"); entry->decoder = (DecodeImageHandler *) ReadPSDImage; entry->encoder = (EncodeImageHandler *) WritePSDImage; entry->magick = (IsImageFormatHandler *) IsPSD; entry->flags |= CoderDecoderSeekableStreamFlag; entry->flags |= CoderEncoderSeekableStreamFlag; (void)RegisterMagickInfo(entry); entry = AcquireMagickInfo("PSD", "PSD", "Adobe Photoshop bitmap"); entry->decoder = (DecodeImageHandler *) ReadPSDImage; entry->encoder = (EncodeImageHandler *) WritePSDImage; entry->magick = (IsImageFormatHandler *) IsPSD; entry->flags |= CoderDecoderSeekableStreamFlag; entry->flags |= CoderEncoderSeekableStreamFlag; (void)RegisterMagickInfo(entry); return (MagickImageCoderSignature); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % U n r e g i s t e r P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % UnregisterPSDImage() removes format registrations made by the % * PSD module from the list of supported formats. % % The format of the * UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void)UnregisterMagickInfo("PSB"); (void)UnregisterMagickInfo("PSD"); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % W r i t e P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded * image format. % % The format of the WritePSDImage method is: % % * MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, * % ExceptionInfo *exception) % % A description of each parameter * follows. % % o image_info: the image info. % % o image: The image. * % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo * psd_info, Image * image, const size_t offset) { if (psd_info->version == 1) return (WriteBlobMSBShort(image, (unsigned short)offset)); return (WriteBlobMSBLong(image, (unsigned int)offset)); } static inline ssize_t WritePSDOffset(const PSDInfo * psd_info, Image * image, const MagickSizeType size, const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset = TellBlob(image); (void)SeekBlob(image, offset, SEEK_SET); if (psd_info->version == 1) result = WriteBlobMSBShort(image, (unsigned short)size); else result = WriteBlobMSBLong(image, (unsigned int)size); (void)SeekBlob(image, current_offset, SEEK_SET); return (result); } static inline ssize_t SetPSDSize(const PSDInfo * psd_info, Image * image, const MagickSizeType size) { if (psd_info->version == 1) return (WriteBlobLong(image, (unsigned int)size)); return (WriteBlobLongLong(image, size)); } static inline ssize_t WritePSDSize(const PSDInfo * psd_info, Image * image, const MagickSizeType size, const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset = TellBlob(image); (void)SeekBlob(image, offset, SEEK_SET); result = SetPSDSize(psd_info, image, size); (void)SeekBlob(image, current_offset, SEEK_SET); return (result); } static size_t PSDPackbitsEncodeImage(Image * image, const size_t length, const unsigned char *pixels, unsigned char *compact_pixels, ExceptionInfo * exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* * Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(pixels != (unsigned char *)NULL); assert(compact_pixels != (unsigned char *)NULL); packbits = (unsigned char *)AcquireQuantumMemory(128UL, sizeof(*packbits)); if (packbits == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); q = compact_pixels; for (i = (ssize_t) length; i != 0;) { switch (i) { case 1: { i--; *q++ = (unsigned char)0; *q++ = (*pixels); break; } case 2: { i -= 2; *q++ = (unsigned char)1; *q++ = (*pixels); *q++ = pixels[1]; break; } case 3: { i -= 3; if ((*pixels == *(pixels + 1)) && (*(pixels + 1) == *(pixels + 2))) { *q++ = (unsigned char)((256 - 3) + 1); *q++ = (*pixels); break; } *q++ = (unsigned char)2; *q++ = (*pixels); *q++ = pixels[1]; *q++ = pixels[2]; break; } default: { if ((*pixels == *(pixels + 1)) && (*(pixels + 1) == *(pixels + 2))) { /* * Packed run. */ count = 3; while (((ssize_t) count < i) && (*pixels == *(pixels + count))) { count++; if (count >= 127) break; } i -= count; *q++ = (unsigned char)((256 - count) + 1); *q++ = (*pixels); pixels += count; break; } /* * Literal run. */ count = 0; while ((*(pixels + count) != *(pixels + count + 1)) || (*(pixels + count + 1) != *(pixels + count + 2))) { packbits[count + 1] = pixels[count]; count++; if (((ssize_t) count >= (i - 3)) || (count >= 127)) break; } i -= count; *packbits = (unsigned char)(count - 1); for (j = 0; j <= (ssize_t) count; j++) *q++ = packbits[j]; pixels += count; break; } } } *q++ = (unsigned char)128; /* EOD marker */ packbits = (unsigned char *)RelinquishMagickMemory(packbits); return ((size_t) (q - compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo * psd_info, Image * image, const Image * next_image, const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length = (size_t) WriteBlobShort(image, RLE); for (i = 0; i < channels; i++) for (y = 0; y < (ssize_t) next_image->rows; y++) length += SetPSDOffset(psd_info, image, 0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length = (size_t) WriteBlobShort(image, ZipWithoutPrediction); #endif else length = (size_t) WriteBlobShort(image, Raw); return (length); } static size_t WritePSDChannel(const PSDInfo * psd_info, const ImageInfo * image_info, Image * image, Image * next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset, const MagickBooleanType separate, const CompressionType compression, ExceptionInfo * exception) { MagickBooleanType monochrome; QuantumInfo * quantum_info; register const Quantum * p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels = (unsigned char *)NULL; flush = Z_NO_FLUSH; #endif count = 0; if (separate != MagickFalse) { size_offset = TellBlob(image) + 2; count += WriteCompressionStart(psd_info, image, next_image, compression, 1); } if (next_image->depth > 8) next_image->depth = 16; monochrome = IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info = AcquireQuantumInfo(image_info, next_image); if (quantum_info == (QuantumInfo *) NULL) return (0); pixels = (unsigned char *)GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels = (unsigned char *)AcquireQuantumMemory( MagickMinBufferExtent, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *)NULL) { quantum_info = DestroyQuantumInfo(quantum_info); return (0); } memset(&stream, 0, sizeof(stream)); stream.data_type = Z_BINARY; level = Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level = (int)image_info->quality; if (deflateInit(&stream, level) != Z_OK) { quantum_info = DestroyQuantumInfo(quantum_info); compressed_pixels = (unsigned char *)RelinquishMagickMemory( compressed_pixels); return (0); } } #endif for (y = 0; y < (ssize_t) next_image->rows; y++) { p = GetVirtualPixels(next_image, 0, y, next_image->columns, 1, exception); if (p == (const Quantum *)NULL) break; length = ExportQuantumPixels(next_image, (CacheView *) NULL, quantum_info, quantum_type, pixels, exception); if (monochrome != MagickFalse) for (i = 0; i < (ssize_t) length; i++) pixels[i] = (~pixels[i]); if (compression == RLECompression) { length = PSDPackbitsEncodeImage(image, length, pixels, compact_pixels, exception); count += WriteBlob(image, length, compact_pixels); size_offset += WritePSDOffset(psd_info, image, length, size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in = (uInt) length; stream.next_in = (Bytef *) pixels; if (y == (ssize_t) next_image->rows - 1) flush = Z_FINISH; do { stream.avail_out = (uInt) MagickMinBufferExtent; stream.next_out = (Bytef *) compressed_pixels; if (deflate(&stream, flush) == Z_STREAM_ERROR) break; length = (size_t) MagickMinBufferExtent - stream.avail_out; if (length > 0) count += WriteBlob(image, length, compressed_pixels); } while (stream.avail_out == 0); } #endif else count += WriteBlob(image, length, pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void)deflateEnd(&stream); compressed_pixels = (unsigned char *)RelinquishMagickMemory( compressed_pixels); } #endif quantum_info = DestroyQuantumInfo(quantum_info); return (count); } static unsigned char * AcquireCompactPixels(const Image * image, ExceptionInfo * exception) { size_t packet_size; unsigned char *compact_pixels; packet_size = image->depth > 8UL ? 2UL : 1UL; compact_pixels = (unsigned char *)AcquireQuantumMemory((9 * image->columns) + 1, packet_size * sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *)NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); } return (compact_pixels); } static size_t WritePSDChannels(const PSDInfo * psd_info, const ImageInfo * image_info, Image * image, Image * next_image, MagickOffsetType size_offset, const MagickBooleanType separate, ExceptionInfo * exception) { CompressionType compression; Image * mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count = 0; offset_length = 0; rows_offset = 0; compact_pixels = (unsigned char *)NULL; compression = next_image->compression; if (image_info->compression != UndefinedCompression) compression = image_info->compression; if (compression == RLECompression) { compact_pixels = AcquireCompactPixels(next_image, exception); if (compact_pixels == (unsigned char *)NULL) return (0); } channels = 1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels = (size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset = TellBlob(image) + 2; count += WriteCompressionStart(psd_info, image, next_image, compression, (ssize_t) channels); offset_length = (next_image->rows * (psd_info->version == 1 ? 2 : 4)); } size_offset += 2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length = WritePSDChannel(psd_info, image_info, image, next_image, IndexQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } else { if (IsImageGray(next_image) != MagickFalse) { length = WritePSDChannel(psd_info, image_info, image, next_image, GrayQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } else { if (next_image->colorspace == CMYKColorspace) (void)NegateCMYK(next_image, exception); length = WritePSDChannel(psd_info, image_info, image, next_image, RedQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; length = WritePSDChannel(psd_info, image_info, image, next_image, GreenQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; length = WritePSDChannel(psd_info, image_info, image, next_image, BlueQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; if (next_image->colorspace == CMYKColorspace) { length = WritePSDChannel(psd_info, image_info, image, next_image, BlackQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length = WritePSDChannel(psd_info, image_info, image, next_image, AlphaQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void)NegateCMYK(next_image, exception); if (separate != MagickFalse) { const char *property; property = GetImageArtifact(next_image, "psd:opacity-mask"); if (property != (const char *)NULL) { mask = (Image *) GetImageRegistry(ImageRegistryType, property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels = AcquireCompactPixels(mask, exception); if (compact_pixels == (unsigned char *)NULL) return (0); } length = WritePSDChannel(psd_info, image_info, image, mask, RedQuantum, compact_pixels, rows_offset, MagickTrue, compression, exception); (void)WritePSDSize(psd_info, image, length, size_offset); count += length; compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); } } } return (count); } static size_t WritePascalString(Image * image, const char *value, size_t padding) { size_t count, length; register ssize_t i; /* * Max length is 255. */ count = 0; length = (strlen(value) > 255UL) ? 255UL : strlen(value); if (length == 0) count += WriteBlobByte(image, 0); else { count += WriteBlobByte(image, (unsigned char)length); count += WriteBlob(image, length, (const unsigned char *)value); } length++; if ((length % padding) == 0) return (count); for (i = 0; i < (ssize_t) (padding - (length % padding)); i++) count += WriteBlobByte(image, 0); return (count); } static void WriteResolutionResourceBlock(Image * image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution = 2.54 * 65536.0 * image->resolution.x + 0.5; y_resolution = 2.54 * 65536.0 * image->resolution.y + 0.5; units = 2; } else { x_resolution = 65536.0 * image->resolution.x + 0.5; y_resolution = 65536.0 * image->resolution.y + 0.5; units = 1; } (void)WriteBlob(image, 4, (const unsigned char *)"8BIM"); (void)WriteBlobMSBShort(image, 0x03ED); (void)WriteBlobMSBShort(image, 0); (void)WriteBlobMSBLong(image, 16); /* resource size */ (void)WriteBlobMSBLong(image, (unsigned int)(x_resolution + 0.5)); (void)WriteBlobMSBShort(image, units); /* horizontal resolution unit */ (void)WriteBlobMSBShort(image, units); /* width unit */ (void)WriteBlobMSBLong(image, (unsigned int)(y_resolution + 0.5)); (void)WriteBlobMSBShort(image, units); /* vertical resolution unit */ (void)WriteBlobMSBShort(image, units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo * psd_info, Image * image, const signed short channel) { size_t count; count = (size_t) WriteBlobShort(image, (const unsigned short)channel); count += SetPSDSize(psd_info, image, 0); return (count); } static void RemoveICCProfileFromResourceBlock(StringInfo * bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length = GetStringInfoLength(bim_profile); if (length < 16) return; datum = GetStringInfoDatum(bim_profile); for (p = datum; (p >= datum) && (p < (datum + length - 16));) { register unsigned char *q; q = (unsigned char *)p; if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) break; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); if (id == 0x0000040f) { ssize_t quantum; quantum = PSDQuantum(count) + 12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q + quantum < (datum + length - 16))) (void)memmove(q, q + quantum, length - quantum - (q - datum)); SetStringInfoLength(bim_profile, length - quantum); } break; } p += count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo * bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length = GetStringInfoLength(bim_profile); if (length < 16) return; datum = GetStringInfoDatum(bim_profile); for (p = datum; (p >= datum) && (p < (datum + length - 16));) { register unsigned char *q; ssize_t cnt; q = (unsigned char *)p; if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) return; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); cnt = PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length - 12)) && ((ssize_t) length - (cnt + 12) - (q - datum)) > 0) { (void)memmove(q, q + cnt + 12, length - (cnt + 12) - (q - datum)); SetStringInfoLength(bim_profile, length - (cnt + 12)); break; } p += count; if ((count & 0x01) != 0) p++; } } static const StringInfo * GetAdditionalInformation(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* * Whitelist of keys from: * https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo * info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo * profile; unsigned char *p; unsigned int size; info = GetImageProfile(image, "psd:additional-info"); if (info == (const StringInfo *)NULL) return ((const StringInfo *)NULL); option = GetImageOption(image_info, "psd:additional-info"); if (LocaleCompare(option, "all") == 0) return (info); if (LocaleCompare(option, "selective") != 0) { profile = RemoveImageProfile(image, "psd:additional-info"); return (DestroyStringInfo(profile)); } length = GetStringInfoLength(info); p = GetStringInfoDatum(info); remaining_length = length; length = 0; while (remaining_length >= 12) { /* skip over signature */ p += 4; key[0] = (char)(*p++); key[1] = (char)(*p++); key[2] = (char)(*p++); key[3] = (char)(*p++); key[4] = '\0'; size = (unsigned int)(*p++) << 24; size |= (unsigned int)(*p++) << 16; size |= (unsigned int)(*p++) << 8; size |= (unsigned int)(*p++); size = size & 0xffffffff; remaining_length -= 12; if ((size_t) size > remaining_length) return ((const StringInfo *)NULL); found = MagickFalse; for (i = 0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key, allowed[i], PSDKeySize) != 0) continue; found = MagickTrue; break; } remaining_length -= (size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p = (unsigned char *)memmove(p - 12, p + size, remaining_length); continue; } length += (size_t) size + 12; p += size; } profile = RemoveImageProfile(image, "psd:additional-info"); if (length == 0) return (DestroyStringInfo(profile)); SetStringInfoLength(profile, (const size_t)length); (void)SetImageProfile(image, "psd:additional-info", info, exception); return (profile); } static MagickBooleanType WritePSDLayersInternal(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, size_t * layers_size, ExceptionInfo * exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo * info; Image * base_image, *next_image; MagickBooleanType status; MagickOffsetType * layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status = MagickTrue; base_image = GetNextImageInList(image); if (base_image == (Image *) NULL) base_image = image; size = 0; size_offset = TellBlob(image); (void)SetPSDSize(psd_info, image, 0); layer_count = 0; for (next_image = base_image; next_image != NULL;) { layer_count++; next_image = GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size += WriteBlobShort(image, -(unsigned short)layer_count); else size += WriteBlobShort(image, (unsigned short)layer_count); layer_size_offsets = (MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count, sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); layer_index = 0; for (next_image = base_image; next_image != NULL;) { Image * mask; unsigned char default_color; unsigned short channels, total_channels; mask = (Image *) NULL; property = GetImageArtifact(next_image, "psd:opacity-mask"); default_color = 0; if (property != (const char *)NULL) { mask = (Image *) GetImageRegistry(ImageRegistryType, property, exception); default_color = (unsigned char)(strlen(property) == 9 ? 255 : 0); } size += WriteBlobSignedLong(image, (signed int)next_image->page.y); size += WriteBlobSignedLong(image, (signed int)next_image->page.x); size += WriteBlobSignedLong(image, (signed int)(next_image->page.y + next_image->rows)); size += WriteBlobSignedLong(image, (signed int)(next_image->page.x + next_image->columns)); channels = 1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels = (unsigned short)(next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels = channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size += WriteBlobShort(image, total_channels); layer_size_offsets[layer_index++] = TellBlob(image); for (i = 0; i < (ssize_t) channels; i++) size += WriteChannelSize(psd_info, image, (signed short)i); if (next_image->alpha_trait != UndefinedPixelTrait) size += WriteChannelSize(psd_info, image, -1); if (mask != (Image *) NULL) size += WriteChannelSize(psd_info, image, -2); size += WriteBlobString(image, image->endian == LSBEndian ? "MIB8" : "8BIM"); size += WriteBlobString(image, CompositeOperatorToPSDBlendMode(next_image)); property = GetImageArtifact(next_image, "psd:layer.opacity"); if (property != (const char *)NULL) { Quantum opacity; opacity = (Quantum) StringToInteger(property); size += WriteBlobByte(image, ScaleQuantumToChar(opacity)); (void)ApplyPSDLayerOpacity(next_image, opacity, MagickTrue, exception); } else size += WriteBlobByte(image, 255); size += WriteBlobByte(image, 0); size += WriteBlobByte(image, (const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - * visible, etc. */ size += WriteBlobByte(image, 0); info = GetAdditionalInformation(image_info, next_image, exception); property = (const char *)GetImageProperty(next_image, "label", exception); if (property == (const char *)NULL) { (void)FormatLocaleString(layer_name, MagickPathExtent, "L%.20g", (double)layer_index); property = layer_name; } name_length = strlen(property) + 1; if ((name_length % 4) != 0) name_length += (4 - (name_length % 4)); if (info != (const StringInfo *)NULL) name_length += GetStringInfoLength(info); name_length += 8; if (mask != (Image *) NULL) name_length += 20; size += WriteBlobLong(image, (unsigned int)name_length); if (mask == (Image *) NULL) size += WriteBlobLong(image, 0); else { if (mask->compose != NoCompositeOp) (void)ApplyPSDOpacityMask(next_image, mask, ScaleCharToQuantum( default_color), MagickTrue, exception); mask->page.y += image->page.y; mask->page.x += image->page.x; size += WriteBlobLong(image, 20); size += WriteBlobSignedLong(image, (const signed int)mask->page.y); size += WriteBlobSignedLong(image, (const signed int)mask->page.x); size += WriteBlobSignedLong(image, (const signed int)(mask->rows + mask->page.y)); size += WriteBlobSignedLong(image, (const signed int)(mask->columns + mask->page.x)); size += WriteBlobByte(image, default_color); size += WriteBlobByte(image, (const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size += WriteBlobMSBShort(image, 0); } size += WriteBlobLong(image, 0); size += WritePascalString(image, property, 4); if (info != (const StringInfo *)NULL) size += WriteBlob(image, GetStringInfoLength(info), GetStringInfoDatum(info)); next_image = GetNextImageInList(next_image); } /* * Now the image data! */ next_image = base_image; layer_index = 0; while (next_image != NULL) { length = WritePSDChannels(psd_info, image_info, image, next_image, layer_size_offsets[layer_index++], MagickTrue, exception); if (length == 0) { status = MagickFalse; break; } size += length; next_image = GetNextImageInList(next_image); } /* * Write the total size */ if (layers_size != (size_t *) NULL) *layers_size = size; if ((size / 2) != ((size + 1) / 2)) rounded_size = size + 1; else rounded_size = size; (void)WritePSDSize(psd_info, image, rounded_size, size_offset); layer_size_offsets = (MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* * Remove the opacity mask from the registry */ next_image = base_image; while (next_image != (Image *) NULL) { property = GetImageArtifact(next_image, "psd:opacity-mask"); if (property != (const char *)NULL) (void)DeleteImageRegistry(property); next_image = GetNextImageInList(next_image); } return (status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, ExceptionInfo * exception) { PolicyDomain domain; PolicyRights rights; domain = CoderPolicyDomain; rights = WritePolicyRights; if (IsRightsAuthorized(domain, rights, "PSD") == MagickFalse) return (MagickTrue); return WritePSDLayersInternal(image, image_info, psd_info, (size_t *) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { const StringInfo * icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo * bim_profile; /* * Open image file. */ assert(image_info != (const ImageInfo *)NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status = OpenBlob(image_info, image, WriteBinaryBlobMode, exception); if (status == MagickFalse) return (status); packet_size = (size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size += image->depth > 8 ? 2 : 1; psd_info.version = 1; if ((LocaleCompare(image_info->magick, "PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version = 2; (void)WriteBlob(image, 4, (const unsigned char *)"8BPS"); (void)WriteBlobMSBShort(image, psd_info.version); /* version */ for (i = 1; i <= 6; i++) (void)WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image, "icc") == (StringInfo *) NULL) && (SetImageGray(image, exception) != MagickFalse)) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void)SetImageStorageClass(image, DirectClass, exception); if (image->colorspace != CMYKColorspace) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels = (image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void)WriteBlobMSBShort(image, (unsigned short)num_channels); (void)WriteBlobMSBLong(image, (unsigned int)image->rows); (void)WriteBlobMSBLong(image, (unsigned int)image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* * Write depth & mode. */ monochrome = IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void)WriteBlobMSBShort(image, (unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void)WriteBlobMSBShort(image, (unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void)WriteBlobMSBShort(image, (unsigned short)(image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void)TransformImageColorspace(image, sRGBColorspace, exception); (void)WriteBlobMSBShort(image, (unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void)TransformImageColorspace(image, CMYKColorspace, exception); (void)WriteBlobMSBShort(image, CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void)WriteBlobMSBLong(image, 0); else { /* * Write PSD raster colormap. */ (void)WriteBlobMSBLong(image, 768); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for (; i < 256; i++) (void)WriteBlobByte(image, 0); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for (; i < 256; i++) (void)WriteBlobByte(image, 0); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for (; i < 256; i++) (void)WriteBlobByte(image, 0); } /* * Image resource block. */ length = 28; /* 0x03EB */ bim_profile = (StringInfo *) GetImageProfile(image, "8bim"); icc_profile = GetImageProfile(image, "icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile = CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length += PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *)NULL) length += PSDQuantum(GetStringInfoLength(icc_profile)) + 12; (void)WriteBlobMSBLong(image, (unsigned int)length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void)WriteBlob(image, GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile = DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void)WriteBlob(image, 4, (const unsigned char *)"8BIM"); (void)WriteBlobMSBShort(image, 0x0000040F); (void)WriteBlobMSBShort(image, 0); (void)WriteBlobMSBLong(image, (unsigned int)GetStringInfoLength( icc_profile)); (void)WriteBlob(image, GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void)WriteBlobByte(image, 0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset = TellBlob(image); (void)SetPSDSize(&psd_info, image, 0); status = WritePSDLayersInternal(image, image_info, &psd_info, &size, exception); size_offset += WritePSDSize(&psd_info, image, size + (psd_info.version == 1 ? 8 : 12), size_offset); } (void)WriteBlobMSBLong(image, 0); /* user mask data */ /* * Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression = image->compression; if (image_info->compression != UndefinedCompression) image->compression = image_info->compression; if (image->compression == ZipCompression) image->compression = RLECompression; if (WritePSDChannels(&psd_info, image_info, image, image, 0, MagickFalse, exception) == 0) status = MagickFalse; image->compression = compression; } (void)CloseBlob(image); return (status); }
/* * Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* * Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* * Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* * Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image * image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image * image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo * info; } LayerInfo; /* * Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *, Image *, ExceptionInfo *); /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % I s P S D * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % IsPSD()() returns MagickTrue if the image format type, * identified by the % magick string, is PSD. % % The format of the IsPSD * method is: % % MagickBooleanType IsPSD(const unsigned char * *magick,const size_t length) % % A description of each parameter follows: * % % o magick: compare image format pattern against these bytes. % % * o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick, const size_t length) { if (length < 4) return (MagickFalse); if (LocaleNCompare((const char *)magick, "8BPS", 4) == 0) return (MagickTrue); return (MagickFalse); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e a d P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns * it. It % allocates the memory necessary for the new Image structure and * returns a % pointer to the new image. % % The format of the ReadPSDImage * method is: % % Image *ReadPSDImage(image_info,ExceptionInfo * *exception) % % A description of each parameter follows: % % o * image_info: the image info. % % o exception: return any errors or * warnings in this structure. % */ static const char * CompositeOperatorToPSDBlendMode(Image * image) { switch (image->compose) { case ColorBurnCompositeOp: return (image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return (image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return (image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return (image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return (image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return (image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return (image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return (image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return (image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return (image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return (image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return (image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return (image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return (image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return (image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return (image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return (image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return (image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return (image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return (image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return (image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return (image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return (image->endian == LSBEndian ? "mron" : "norm"); } } /* * For some reason Photoshop seems to blend semi-transparent pixels with * white. This method reverts the blending. This can be disabled by setting * the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->alpha_trait != BlendPixelTrait) || (image->colorspace != sRGBColorspace)) return (MagickTrue); option = GetImageOption(image_info, "psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return (MagickTrue); status = MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma = QuantumScale * GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i = 0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image, i); if (channel != AlphaPixelChannel) q[i] = ClampToQuantum((q[i] - ((1.0 - gamma) * QuantumRange)) / gamma); } } q += GetPixelChannels(image); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } return (status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image * image, Quantum opacity, MagickBooleanType revert, ExceptionInfo * exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " applying layer opacity %.20g", (double)opacity); if (opacity == OpaqueAlpha) return (MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel, exception); status = MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); if (q == (Quantum *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image, (Quantum) (QuantumScale * (GetPixelAlpha(image, q)) * opacity), q); else if (opacity > 0) SetPixelAlpha(image, (Quantum) (QuantumRange * (GetPixelAlpha(image, q) / (MagickRealType) opacity)), q); q += GetPixelChannels(image); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } return (status); } static MagickBooleanType ApplyPSDOpacityMask(Image * image, const Image * mask, Quantum background, MagickBooleanType revert, ExceptionInfo * exception) { Image * complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->alpha_trait == UndefinedPixelTrait) return (MagickTrue); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " applying opacity mask"); complete_mask = CloneImage(image, 0, 0, MagickTrue, exception); if (complete_mask == (Image *) NULL) return (MagickFalse); complete_mask->alpha_trait = BlendPixelTrait; GetPixelInfo(complete_mask, &color); color.red = (MagickRealType) background; (void)SetImageColor(complete_mask, &color, exception); status = CompositeImage(complete_mask, mask, OverCompositeOp, MagickTrue, mask->page.x - image->page.x, mask->page.y - image->page.y, exception); if (status == MagickFalse) { complete_mask = DestroyImage(complete_mask); return (status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register Quantum * magick_restrict q; register Quantum * p; register ssize_t x; if (status == MagickFalse) continue; q = GetAuthenticPixels(image, 0, y, image->columns, 1, exception); p = GetAuthenticPixels(complete_mask, 0, y, complete_mask->columns, 1, exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha = (MagickRealType) GetPixelAlpha(image, q); intensity = GetPixelIntensity(complete_mask, p); if (revert == MagickFalse) SetPixelAlpha(image, ClampToQuantum(intensity * (QuantumScale * alpha)), q); else if (intensity > 0) SetPixelAlpha(image, ClampToQuantum((alpha / intensity) * QuantumRange), q); q += GetPixelChannels(image); p += GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image, exception) == MagickFalse) status = MagickFalse; } complete_mask = DestroyImage(complete_mask); return (status); } static void PreservePSDOpacityMask(Image * image, LayerInfo * layer_info, ExceptionInfo * exception) { char *key; RandomInfo * random_info; StringInfo * key_info; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " preserving opacity mask"); random_info = AcquireRandomInfo(); key_info = GetRandomKey(random_info, 2 + 1); key = (char *)GetStringInfoDatum(key_info); key[8] = (char)layer_info->mask.background; key[9] = '\0'; layer_info->mask.image->page.x += layer_info->page.x; layer_info->mask.image->page.y += layer_info->page.y; (void)SetImageRegistry(ImageRegistryType, (const char *)key, layer_info->mask.image, exception); (void)SetImageArtifact(layer_info->image, "psd:opacity-mask", (const char *)key); key_info = DestroyStringInfo(key_info); random_info = DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels, const ssize_t depth, const size_t number_pixels, unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets = (ssize_t) number_compact_pixels; for (i = 0; (packets > 1) && (i < (ssize_t) number_pixels);) { packets--; length = (size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length = 256 - length + 1; CheckNumberCompactPixels; pixel = (*compact_pixels++); for (j = 0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++ = (pixel >> 7) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 6) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 5) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 4) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 3) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 2) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 1) & 0x01 ? 0U : 255U; *pixels++ = (pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++ = (unsigned char)((pixel >> 6) & 0x03); *pixels++ = (unsigned char)((pixel >> 4) & 0x03); *pixels++ = (unsigned char)((pixel >> 2) & 0x03); *pixels++ = (unsigned char)((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++ = (unsigned char)((pixel >> 4) & 0xff); *pixels++ = (unsigned char)((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++ = (unsigned char)pixel; break; } } } continue; } length++; for (j = 0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++ = (*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++ = (*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++ = (*compact_pixels >> 6) & 0x03; *pixels++ = (*compact_pixels >> 4) & 0x03; *pixels++ = (*compact_pixels >> 2) & 0x03; *pixels++ = (*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++ = (*compact_pixels >> 4) & 0xff; *pixels++ = (*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++ = (*compact_pixels); break; } } compact_pixels++; } } return (i); } static inline LayerInfo * DestroyLayerInfo(LayerInfo * layer_info, const ssize_t number_layers) { ssize_t i; for (i = 0; i < number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image = DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image = DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info = DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image * image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return (2); } if (image->depth > 16) return (4); if (image->depth > 8) return (2); return (1); } static inline MagickSizeType GetPSDSize(const PSDInfo * psd_info, Image * image) { if (psd_info->version == 1) return ((MagickSizeType) ReadBlobLong(image)); return ((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image * image) { if (image->depth == 1) return (((image->columns + 7) / 8) * GetPSDPacketSize(image)); else return (image->columns * GetPSDPacketSize(image)); } static const char * ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image * image, ExceptionInfo * exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask = SetImageChannelMask(image, (ChannelType) (AllChannels & ~ AlphaChannel)); status = NegateImage(image, MagickFalse, exception); (void)SetImageChannelMask(image, channel_mask); return (status); } static StringInfo * ParseImageResourceBlocks(PSDInfo * psd_info, Image * image, const unsigned char *blocks, size_t length) { const unsigned char *p; ssize_t offset; StringInfo * profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return ((StringInfo *) NULL); profile = BlobToStringInfo((const unsigned char *)NULL, length); SetStringInfoDatum(profile, blocks); SetStringInfoName(profile, "8bim"); for (p = blocks; (p >= blocks) && (p < (blocks + length - 7));) { if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) break; p += 4; p = PushShortPixel(MSBEndian, p, &id); p = PushCharPixel(p, &name_length); if ((name_length % 2) == 0) name_length++; p += name_length; if (p > (blocks + length - 4)) break; p = PushLongPixel(MSBEndian, p, &count); offset = (ssize_t) count; if (((p + offset) < blocks) || ((p + offset) > (blocks + length))) break; switch (id) { case 0x03ed: { unsigned short resolution; /* * Resolution info. */ if (offset < 16) break; p = PushShortPixel(MSBEndian, p, &resolution); image->resolution.x = (double)resolution; (void)FormatImageProperty(image, "tiff:XResolution", "%*g", GetMagickPrecision(), image->resolution.x); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &resolution); image->resolution.y = (double)resolution; (void)FormatImageProperty(image, "tiff:YResolution", "%*g", GetMagickPrecision(), image->resolution.y); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushShortPixel(MSBEndian, p, &short_sans); image->units = PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p + 4) == 0)) psd_info->has_merged_image = MagickFalse; p += offset; break; } default: { p += offset; break; } } if ((offset & 0x01) != 0) p++; } return (profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *)NULL) return (OverCompositeOp); if (LocaleNCompare(mode, "norm", 4) == 0) return (OverCompositeOp); if (LocaleNCompare(mode, "mul ", 4) == 0) return (MultiplyCompositeOp); if (LocaleNCompare(mode, "diss", 4) == 0) return (DissolveCompositeOp); if (LocaleNCompare(mode, "diff", 4) == 0) return (DifferenceCompositeOp); if (LocaleNCompare(mode, "dark", 4) == 0) return (DarkenCompositeOp); if (LocaleNCompare(mode, "lite", 4) == 0) return (LightenCompositeOp); if (LocaleNCompare(mode, "hue ", 4) == 0) return (HueCompositeOp); if (LocaleNCompare(mode, "sat ", 4) == 0) return (SaturateCompositeOp); if (LocaleNCompare(mode, "colr", 4) == 0) return (ColorizeCompositeOp); if (LocaleNCompare(mode, "lum ", 4) == 0) return (LuminizeCompositeOp); if (LocaleNCompare(mode, "scrn", 4) == 0) return (ScreenCompositeOp); if (LocaleNCompare(mode, "over", 4) == 0) return (OverlayCompositeOp); if (LocaleNCompare(mode, "hLit", 4) == 0) return (HardLightCompositeOp); if (LocaleNCompare(mode, "sLit", 4) == 0) return (SoftLightCompositeOp); if (LocaleNCompare(mode, "smud", 4) == 0) return (ExclusionCompositeOp); if (LocaleNCompare(mode, "div ", 4) == 0) return (ColorDodgeCompositeOp); if (LocaleNCompare(mode, "idiv", 4) == 0) return (ColorBurnCompositeOp); if (LocaleNCompare(mode, "lbrn", 4) == 0) return (LinearBurnCompositeOp); if (LocaleNCompare(mode, "lddg", 4) == 0) return (LinearDodgeCompositeOp); if (LocaleNCompare(mode, "lLit", 4) == 0) return (LinearLightCompositeOp); if (LocaleNCompare(mode, "vLit", 4) == 0) return (VividLightCompositeOp); if (LocaleNCompare(mode, "pLit", 4) == 0) return (PinLightCompositeOp); if (LocaleNCompare(mode, "hMix", 4) == 0) return (HardMixCompositeOp); return (OverCompositeOp); } static inline ssize_t ReadPSDString(Image * image, char *p, const size_t length) { ssize_t count; count = ReadBlob(image, length, (unsigned char *)p); if ((count == (ssize_t) length) && (image->endian != MSBEndian)) { char *q; q = p + length; for (--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } return (count); } static inline void SetPSDPixel(Image * image, const size_t channels, const ssize_t type, const size_t packet_size, const Quantum pixel, Quantum * q, ExceptionInfo * exception) { if (image->storage_class == PseudoClass) { PixelInfo * color; Quantum index; index = pixel; if (packet_size == 1) index = (Quantum) ScaleQuantumToChar(index); index = (Quantum) ConstrainColormapIndex(image, (ssize_t) index, exception); if (type == 0) SetPixelIndex(image, index, q); if ((type == 0) && (channels > 1)) return; color = image->colormap + (ssize_t) GetPixelIndex(image, q); if (type != 0) color->alpha = (MagickRealType) pixel; SetPixelViaPixelInfo(image, color, q); return; } switch (type) { case -1: { SetPixelAlpha(image, pixel, q); break; } case -2: case 0: { SetPixelRed(image, pixel, q); break; } case -3: case 1: { SetPixelGreen(image, pixel, q); break; } case -4: case 2: { SetPixelBlue(image, pixel, q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image, pixel, q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image, pixel, q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image, pixel, q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image * image, const size_t channels, const ssize_t row, const ssize_t type, const unsigned char *pixels, ExceptionInfo * exception) { Quantum pixel; register const unsigned char *p; register Quantum * q; register ssize_t x; size_t packet_size; p = pixels; q = GetAuthenticPixels(image, 0, row, image->columns, 1, exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size = GetPSDPacketSize(image); for (x = 0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel = ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p = PushShortPixel(MSBEndian, p, &nibble); pixel = ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p = PushFloatPixel(MSBEndian, p, &nibble); pixel = ClampToQuantum((MagickRealType) (QuantumRange * nibble)); } if (image->depth > 1) { SetPSDPixel(image, channels, type, packet_size, pixel, q, exception); q += GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits = (ssize_t) image->columns - x; if (number_bits > 8) number_bits = 8; for (bit = 0; bit < (ssize_t) number_bits; bit++) { SetPSDPixel(image, channels, type, packet_size, (((unsigned char)pixel) & (0x01 << (7 - bit))) != 0 ? 0 : QuantumRange, q, exception); q += GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return (SyncAuthenticPixels(image, exception)); } static MagickBooleanType ReadPSDChannelRaw(Image * image, const size_t channels, const ssize_t type, ExceptionInfo * exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is RAW"); row_size = GetPSDRowSize(image); pixels = (unsigned char *)AcquireQuantumMemory(row_size, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); (void)memset(pixels, 0, row_size * sizeof(*pixels)); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { status = MagickFalse; count = ReadBlob(image, row_size, pixels); if (count != (ssize_t) row_size) break; status = ReadPSDChannelPixels(image, channels, y, type, pixels, exception); if (status == MagickFalse) break; } pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } static inline MagickOffsetType * ReadPSDRLESizes(Image * image, const PSDInfo * psd_info, const size_t size) { MagickOffsetType * sizes; ssize_t y; sizes = (MagickOffsetType *) AcquireQuantumMemory(size, sizeof(*sizes)); if (sizes != (MagickOffsetType *) NULL) { for (y = 0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y] = (MagickOffsetType) ReadBlobShort(image); else sizes[y] = (MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image * image, const PSDInfo * psd_info, const ssize_t type, MagickOffsetType * sizes, ExceptionInfo * exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is RLE compressed"); row_size = GetPSDRowSize(image); pixels = (unsigned char *)AcquireQuantumMemory(row_size, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); length = 0; for (y = 0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length = (size_t) sizes[y]; if (length > (row_size + 2048)) /* arbitrary number */ { pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "InvalidLength", image->filename); } compact_pixels = (unsigned char *)AcquireQuantumMemory(length, sizeof(*pixels)); if (compact_pixels == (unsigned char *)NULL) { pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)memset(compact_pixels, 0, length * sizeof(*compact_pixels)); status = MagickTrue; for (y = 0; y < (ssize_t) image->rows; y++) { status = MagickFalse; count = ReadBlob(image, (size_t) sizes[y], compact_pixels); if (count != (ssize_t) sizes[y]) break; count = DecodePSDPixels((size_t) sizes[y], compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth), row_size, pixels); if (count != (ssize_t) row_size) break; status = ReadPSDChannelPixels(image, psd_info->channels, y, type, pixels, exception); if (status == MagickFalse) break; } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static void Unpredict8Bit(const Image * image, unsigned char *pixels, const size_t count, const size_t row_size) { register unsigned char *p; size_t length, remaining; p = pixels; remaining = count; while (remaining > 0) { length = image->columns; while (--length) { *(p + 1) += *p; p++; } p++; remaining -= row_size; } } static void Unpredict16Bit(const Image * image, unsigned char *pixels, const size_t count, const size_t row_size) { register unsigned char *p; size_t length, remaining; p = pixels; remaining = count; while (remaining > 0) { length = image->columns; while (--length) { p[2] += p[0] + ((p[1] + p[3]) >> 8); p[3] += p[1]; p += 2; } p += 2; remaining -= row_size; } } static void Unpredict32Bit(const Image * image, unsigned char *pixels, unsigned char *output_pixels, const size_t row_size) { register unsigned char *p, *q; register ssize_t y; size_t offset1, offset2, offset3, remaining; unsigned char *start; offset1 = image->columns; offset2 = 2 * offset1; offset3 = 3 * offset1; p = pixels; q = output_pixels; for (y = 0; y < (ssize_t) image->rows; y++) { start = p; remaining = row_size; while (--remaining) { *(p + 1) += *p; p++; } p = start; remaining = image->columns; while (remaining--) { *(q++) = *p; *(q++) = *(p + offset1); *(q++) = *(p + offset2); *(q++) = *(p + offset3); p++; } p = start + row_size; } } static MagickBooleanType ReadPSDChannelZip(Image * image, const size_t channels, const ssize_t type, const PSDCompressionType compression, const size_t compact_size, ExceptionInfo * exception) { MagickBooleanType status; register unsigned char *p; size_t count, packet_size, row_size; register ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); compact_pixels = (unsigned char *)AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); packet_size = GetPSDPacketSize(image); row_size = image->columns * packet_size; count = image->rows * row_size; pixels = (unsigned char *)AcquireQuantumMemory(count, sizeof(*pixels)); if (pixels == (unsigned char *)NULL) { compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } if (ReadBlob(image, compact_size, compact_pixels) != (ssize_t) compact_size) { pixels = (unsigned char *)RelinquishMagickMemory(pixels); compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } memset(&stream, 0, sizeof(stream)); stream.data_type = Z_BINARY; stream.next_in = (Bytef *) compact_pixels; stream.avail_in = (uInt) compact_size; stream.next_out = (Bytef *) pixels; stream.avail_out = (uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret = inflate(&stream, Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void)inflateEnd(&stream); compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (MagickFalse); } if (ret == Z_STREAM_END) break; } (void)inflateEnd(&stream); } if (compression == ZipWithPrediction) { if (packet_size == 1) Unpredict8Bit(image, pixels, count, row_size); else if (packet_size == 2) Unpredict16Bit(image, pixels, count, row_size); else if (packet_size == 4) { unsigned char *output_pixels; output_pixels = (unsigned char *)AcquireQuantumMemory(count, sizeof(*output_pixels)); if (pixels == (unsigned char *)NULL) { compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } Unpredict32Bit(image, pixels, output_pixels, row_size); pixels = (unsigned char *)RelinquishMagickMemory(pixels); pixels = output_pixels; } } status = MagickTrue; p = pixels; for (y = 0; y < (ssize_t) image->rows; y++) { status = ReadPSDChannelPixels(image, channels, y, type, p, exception); if (status == MagickFalse) break; p += row_size; } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); pixels = (unsigned char *)RelinquishMagickMemory(pixels); return (status); } #endif static MagickBooleanType ReadPSDChannel(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, LayerInfo * layer_info, const size_t channel, const PSDCompressionType compression, ExceptionInfo * exception) { Image * channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image = image; mask = (Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* * Ignore mask that is not a user supplied layer mask, if the mask is * disabled or if the flags have unsupported values. */ option = GetImageOption(image_info, "psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void)SeekBlob(image, (MagickOffsetType) layer_info->channel_info[channel].size - 2, SEEK_CUR); return (MagickTrue); } mask = CloneImage(image, layer_info->mask.page.width, layer_info->mask.page.height, MagickFalse, exception); if (mask != (Image *) NULL) { (void)ResetImagePixels(mask, exception); (void)SetImageType(mask, GrayscaleType, exception); channel_image = mask; } } offset = TellBlob(image); status = MagickFalse; switch (compression) { case Raw: status = ReadPSDChannelRaw(channel_image, psd_info->channels, (ssize_t) layer_info->channel_info[channel].type, exception); break; case RLE: { MagickOffsetType * sizes; sizes = ReadPSDRLESizes(channel_image, psd_info, channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); status = ReadPSDChannelRLE(channel_image, psd_info, (ssize_t) layer_info->channel_info[channel].type, sizes, exception); sizes = (MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status = ReadPSDChannelZip(channel_image, layer_info->channels, (ssize_t) layer_info->channel_info[channel].type, compression, layer_info->channel_info[channel].size - 2, exception); #else (void)ThrowMagickException(exception, GetMagickModule(), MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)", image->filename); #endif break; default: (void)ThrowMagickException(exception, GetMagickModule(), TypeWarning, "CompressionNotSupported", "'%.20g'", (double)compression); break; } (void)SeekBlob(image, offset + layer_info->channel_info[channel].size - 2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void)DestroyImage(mask); ThrowBinaryException(CoderError, "UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image = DestroyImage(layer_info->mask.image); layer_info->mask.image = mask; } return (status); } static MagickBooleanType ReadPSDLayer(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, LayerInfo * layer_info, ExceptionInfo * exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void)SetImageBackgroundColor(layer_info->image, exception); layer_info->image->compose = PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose = NoCompositeOp; /* * Set up some hidden attributes for folks that need them. */ (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double)layer_info->page.x); (void)SetImageArtifact(layer_info->image, "psd:layer.x", message); (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double)layer_info->page.y); (void)SetImageArtifact(layer_info->image, "psd:layer.y", message); (void)FormatLocaleString(message, MagickPathExtent, "%.20g", (double) layer_info->opacity); (void)SetImageArtifact(layer_info->image, "psd:layer.opacity", message); (void)SetImageProperty(layer_info->image, "label", (char *)layer_info->name, exception); status = MagickTrue; for (j = 0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading data for channel %.20g", (double)j); compression = (PSDCompressionType) ReadBlobShort(layer_info->image); layer_info->image->compression = ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait = BlendPixelTrait; status = ReadPSDChannel(layer_info->image, image_info, psd_info, layer_info, (size_t) j, compression, exception); if (status == MagickFalse) break; } if (status != MagickFalse) status = ApplyPSDLayerOpacity(layer_info->image, layer_info->opacity, MagickFalse, exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status = NegateCMYK(layer_info->image, exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x = layer_info->mask.page.x; layer_info->mask.image->page.y = layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose = NoCompositeOp; else status = ApplyPSDOpacityMask(layer_info->image, layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange, MagickFalse, exception); option = GetImageOption(image_info, "psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image, layer_info, exception); layer_info->mask.image = DestroyImage(layer_info->mask.image); } return (status); } static MagickBooleanType CheckPSDChannels(const PSDInfo * psd_info, LayerInfo * layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return (MagickFalse); channel_type = RedChannel; if (psd_info->min_channels >= 3) channel_type |= (GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type |= BlackChannel; for (i = 0; i < (ssize_t) layer_info->channels; i++) { short type; type = layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return (MagickFalse); if (type == -1) { channel_type |= AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type &= ~RedChannel; else if (type == 1) channel_type &= ~GreenChannel; else if (type == 2) channel_type &= ~BlueChannel; else if (type == 3) channel_type &= ~BlackChannel; } if (channel_type == 0) return (MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return (MagickTrue); return (MagickFalse); } static void AttachPSDLayers(Image * image, LayerInfo * layer_info, ssize_t number_layers) { register ssize_t i; ssize_t j; for (i = 0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j = i; j < number_layers - 1; j++) layer_info[j] = layer_info[j + 1]; number_layers--; i--; } } if (number_layers == 0) { layer_info = (LayerInfo *) RelinquishMagickMemory(layer_info); return; } for (i = 0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous = layer_info[i - 1].image; if (i < (number_layers - 1)) layer_info[i].image->next = layer_info[i + 1].image; layer_info[i].image->page = layer_info[i].page; } image->next = layer_info[0].image; layer_info[0].image->previous = image; layer_info = (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline MagickBooleanType PSDSkipImage(const PSDInfo * psd_info, const ImageInfo * image_info, const size_t index) { if (psd_info->has_merged_image == MagickFalse) return (MagickFalse); if (image_info->number_scenes == 0) return (MagickFalse); if (index < image_info->scene) return (MagickTrue); if (index > image_info->scene + image_info->number_scenes - 1) return (MagickTrue); return (MagickFalse); } static void CheckMergedImageAlpha(const PSDInfo * psd_info, Image * image) { /* * The number of layers cannot be used to determine if the merged image * contains an alpha channel. So we enable it when we think we should. */ if (((psd_info->mode == GrayscaleMode) && (psd_info->channels > 1)) || ((psd_info->mode == RGBMode) && (psd_info->channels > 3)) || ((psd_info->mode == CMYKMode) && (psd_info->channels > 4))) image->alpha_trait = BlendPixelTrait; } static void ParseAdditionalInfo(LayerInfo * layer_info) { char key[5]; size_t remaining_length; unsigned char *p; unsigned int size; p = GetStringInfoDatum(layer_info->info); remaining_length = GetStringInfoLength(layer_info->info); while (remaining_length >= 12) { /* skip over signature */ p += 4; key[0] = (char)(*p++); key[1] = (char)(*p++); key[2] = (char)(*p++); key[3] = (char)(*p++); key[4] = '\0'; size = (unsigned int)(*p++) << 24; size |= (unsigned int)(*p++) << 16; size |= (unsigned int)(*p++) << 8; size |= (unsigned int)(*p++); size = size & 0xffffffff; remaining_length -= 12; if ((size_t) size > remaining_length) break; if (LocaleNCompare(key, "luni", sizeof(key)) == 0) { unsigned char *name; unsigned int length; length = (unsigned int)(*p++) << 24; length |= (unsigned int)(*p++) << 16; length |= (unsigned int)(*p++) << 8; length |= (unsigned int)(*p++); if (length * 2 > size - 4) break; if (sizeof(layer_info->name) <= length) break; name = layer_info->name; while (length > 0) { /* Only ASCII strings are supported */ if (*p++ != '\0') break; *name++ = *p++; length--; } if (length == 0) *name = '\0'; break; } else p += size; remaining_length -= (size_t) size; } } static MagickSizeType GetLayerInfoSize(const PSDInfo * psd_info, Image * image) { char type[4]; MagickSizeType size; ssize_t count; size = GetPSDSize(psd_info, image); if (size != 0) return (size); (void)ReadBlobLong(image); count = ReadPSDString(image, type, 4); if ((count != 4) || (LocaleNCompare(type, "8BIM", 4) != 0)) return (0); count = ReadPSDString(image, type, 4); if ((count == 4) && ((LocaleNCompare(type, "Mt16", 4) == 0) || (LocaleNCompare(type, "Mt32", 4) == 0) || (LocaleNCompare(type, "Mtrn", 4) == 0))) { size = GetPSDSize(psd_info, image); if (size != 0) return (0); image->alpha_trait = BlendPixelTrait; count = ReadPSDString(image, type, 4); if ((count != 4) || (LocaleNCompare(type, "8BIM", 4) != 0)) return (0); count = ReadPSDString(image, type, 4); } if ((count == 4) && ((LocaleNCompare(type, "Lr16", 4) == 0) || (LocaleNCompare(type, "Lr32", 4) == 0))) size = GetPSDSize(psd_info, image); return (size); } static MagickBooleanType ReadPSDLayersInternal(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, const MagickBooleanType skip_layers, ExceptionInfo * exception) { char type[4]; LayerInfo * layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, index, j, number_layers; size = GetLayerInfoSize(psd_info, image); if (size == 0) { CheckMergedImageAlpha(psd_info, image); return (MagickTrue); } layer_info = (LayerInfo *) NULL; number_layers = (ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* * The first alpha channel in the merged result contains the * transparency data for the merged result. */ number_layers = MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " negative layer count corrected for"); image->alpha_trait = BlendPixelTrait; } /* * We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return (MagickTrue); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " image contains %.20g layers", (double)number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError, "InvalidNumberOfLayers", image->filename); layer_info = (LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } (void)memset(layer_info, 0, (size_t) number_layers * sizeof(*layer_info)); for (i = 0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading layer #%.20g", (double)i + 1); top = (ssize_t) ReadBlobSignedLong(image); left = (ssize_t) ReadBlobSignedLong(image); bottom = (ssize_t) ReadBlobSignedLong(image); right = (ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } layer_info[i].page.y = top; layer_info[i].page.x = left; layer_info[i].page.width = (size_t) (right - left); layer_info[i].page.height = (size_t) (bottom - top); layer_info[i].channels = ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double)layer_info[i].page.x, (double)layer_info[i].page.y, (double)layer_info[i].page.height, (double) layer_info[i].page.width, (double)layer_info[i].channels); for (j = 0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type = (short)ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size = (size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g", (double)j, (double)layer_info[i].channel_info[j].type, (double)layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info, &layer_info[i]) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } count = ReadPSDString(image, type, 4); if ((count != 4) || (LocaleNCompare(type, "8BIM", 4) != 0)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } count = ReadPSDString(image, layer_info[i].blendkey, 4); if (count != 4) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "ImproperImageHeader", image->filename); } layer_info[i].opacity = (Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping = (unsigned char)ReadBlobByte(image); layer_info[i].flags = (unsigned char)ReadBlobByte(image); layer_info[i].visible = !(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey, (double)layer_info[i].opacity, layer_info[i].clipping ? "true" : "false", layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void)ReadBlobByte(image); /* filler */ size = ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer contains additional info"); length = ReadBlobLong(image); combined_length = length + 4; if (length != 0) { /* * Layer mask info. */ layer_info[i].mask.page.y = (ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x = (ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height = (size_t) (ReadBlobSignedLong(image) - layer_info[i].mask.page.y); layer_info[i].mask.page.width = (size_t) ( ReadBlobSignedLong(image) - layer_info[i].mask.page.x); layer_info[i].mask.background = (unsigned char)ReadBlobByte( image); layer_info[i].mask.flags = (unsigned char)ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y = layer_info[i].mask.page.y - layer_info[i].page.y; layer_info[i].mask.page.x = layer_info[i].mask.page.x - layer_info[i].page.x; } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double)layer_info[i].mask.page.x, (double) layer_info[i].mask.page.y, (double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height, (double)((MagickOffsetType) length) - 18); /* * Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image, (MagickSizeType) (length - 18)) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } length = ReadBlobLong(image); combined_length += length + 4; if (length != 0) { /* * Layer blending ranges info. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer blending ranges: length=%.20g", (double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image, length) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } /* * Layer name. */ length = (MagickSizeType) (unsigned char)ReadBlobByte(image); combined_length += length + 1; if (length > 0) (void)ReadBlob(image, (size_t) length++, layer_info[i].name); layer_info[i].name[length] = '\0'; if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer name: %s", layer_info[i].name); if ((length % 4) != 0) { length = 4 - (length % 4); combined_length += length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image, length) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } length = (MagickSizeType) size - combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile", image->filename); } layer_info[i].info = AcquireStringInfo((const size_t)length); info = GetStringInfoDatum(layer_info[i].info); (void)ReadBlob(image, (const size_t)length, info); ParseAdditionalInfo(&layer_info[i]); } } } for (i = 0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info = DestroyStringInfo(layer_info[i].info); continue; } /* * Allocate layered image. */ layer_info[i].image = CloneImage(image, layer_info[i].page.width, layer_info[i].page.height, MagickFalse, exception); if (layer_info[i].image == (Image *) NULL) { layer_info = DestroyLayerInfo(layer_info, number_layers); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " allocation of image for layer %.20g failed", (double)i); ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void)SetImageProfile(layer_info[i].image, "psd:additional-info", layer_info[i].info, exception); layer_info[i].info = DestroyStringInfo(layer_info[i].info); } } if (image_info->ping != MagickFalse) { AttachPSDLayers(image, layer_info, number_layers); return (MagickTrue); } status = MagickTrue; index = 0; for (i = 0; i < number_layers; i++) { if ((layer_info[i].image == (Image *) NULL) || (PSDSkipImage(psd_info, image_info, ++index) != MagickFalse)) { for (j = 0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image, (MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info = DestroyLayerInfo(layer_info, number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile", image->filename); } } continue; } if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading data for layer %.20g", (double)i); status = ReadPSDLayer(image, image_info, psd_info, &layer_info[i], exception); if (status == MagickFalse) break; status = SetImageProgress(image, LoadImagesTag, (MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } if (status != MagickFalse) AttachPSDLayers(image, layer_info, number_layers); else layer_info = DestroyLayerInfo(layer_info, number_layers); return (status); } ModuleExport MagickBooleanType ReadPSDLayers(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, ExceptionInfo * exception) { PolicyDomain domain; PolicyRights rights; domain = CoderPolicyDomain; rights = ReadPolicyRights; if (IsRightsAuthorized(domain, rights, "PSD") == MagickFalse) return (MagickTrue); return (ReadPSDLayersInternal(image, image_info, psd_info, MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo * image_info, Image * image, const PSDInfo * psd_info, ExceptionInfo * exception) { MagickOffsetType * sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; if ((image_info->number_scenes != 0) && (image_info->scene != 0)) return (MagickTrue); compression = (PSDCompressionType) ReadBlobMSBShort(image); image->compression = ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void)ThrowMagickException(exception, GetMagickModule(), TypeWarning, "CompressionNotSupported", "'%.20g'", (double)compression); return (MagickFalse); } sizes = (MagickOffsetType *) NULL; if (compression == RLE) { sizes = ReadPSDRLESizes(image, psd_info, image->rows * psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); } status = MagickTrue; for (i = 0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type = i; if ((type == 1) && (psd_info->channels == 2)) type = -1; if (compression == RLE) status = ReadPSDChannelRLE(image, psd_info, type, sizes + (i * image->rows), exception); else status = ReadPSDChannelRaw(image, psd_info->channels, type, exception); if (status != MagickFalse) status = SetImageProgress(image, LoadImagesTag, (MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status = NegateCMYK(image, exception); if (status != MagickFalse) status = CorrectPSDAlphaBlend(image_info, image, exception); sizes = (MagickOffsetType *) RelinquishMagickMemory(sizes); return (status); } static Image * ReadPSDImage(const ImageInfo * image_info, ExceptionInfo * exception) { Image * image; MagickBooleanType skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t image_list_length; ssize_t count; StringInfo * profile; /* * Open image file. */ assert(image_info != (const ImageInfo *)NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image = AcquireImage(image_info, exception); status = OpenBlob(image_info, image, ReadBinaryBlobMode, exception); if (status == MagickFalse) { image = DestroyImageList(image); return ((Image *) NULL); } /* * Read image header. */ image->endian = MSBEndian; count = ReadBlob(image, 4, (unsigned char *)psd_info.signature); psd_info.version = ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature, "8BPS", 4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); (void)ReadBlob(image, 6, psd_info.reserved); psd_info.channels = ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError, "MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError, "MaximumChannelsExceeded"); psd_info.rows = ReadBlobMSBLong(image); psd_info.columns = ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.depth = ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.mode = ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double)psd_info.columns, (double)psd_info.rows, (double) psd_info.channels, (double)psd_info.depth, ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); /* * Initialize image. */ image->depth = psd_info.depth; image->columns = psd_info.columns; image->rows = psd_info.rows; status = SetImageExtent(image, image->columns, image->rows, exception); if (status == MagickFalse) return (DestroyImageList(image)); status = ResetImagePixels(image, exception); if (status == MagickFalse) return (DestroyImageList(image)); psd_info.min_channels = 3; if (psd_info.mode == LabMode) (void)SetImageColorspace(image, LabColorspace, exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels = 4; (void)SetImageColorspace(image, CMYKColorspace, exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status = AcquireImageColormap(image, MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize), exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels = 1; (void)SetImageColorspace(image, GRAYColorspace, exception); } else if (psd_info.mode == IndexedMode) psd_info.min_channels = 1; if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); /* * Read PSD raster colormap only present for indexed and duotone images. */ length = ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* * Duotone image data; the format of this data is undocumented. * 32 bits per pixel; the colormap is ignored. */ (void)SeekBlob(image, (const MagickOffsetType)length, SEEK_CUR); } else { size_t number_colors; /* * Read PSD raster colormap. */ number_colors = (size_t) length / 3; if (number_colors > 65536) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); if (AcquireImageColormap(image, number_colors, exception) == MagickFalse) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].red = (MagickRealType) ScaleCharToQuantum( (unsigned char)ReadBlobByte(image)); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].green = (MagickRealType) ScaleCharToQuantum( (unsigned char)ReadBlobByte(image)); for (i = 0; i < (ssize_t) image->colors; i++) image->colormap[i].blue = (MagickRealType) ScaleCharToQuantum( (unsigned char)ReadBlobByte(image)); image->alpha_trait = UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); psd_info.has_merged_image = MagickTrue; profile = (StringInfo *) NULL; length = ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* * Image resources block. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading image resource blocks - %.20g bytes", (double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); blocks = (unsigned char *)AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *)NULL) ThrowReaderException(ResourceLimitError, "MemoryAllocationFailed"); count = ReadBlob(image, (size_t) length, blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *)blocks, "8BIM", 4) != 0)) { blocks = (unsigned char *)RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } profile = ParseImageResourceBlocks(&psd_info, image, blocks, (size_t) length); blocks = (unsigned char *)RelinquishMagickMemory(blocks); } /* * Layer and mask block. */ length = GetPSDSize(&psd_info, image); if (length == 8) { length = ReadBlobMSBLong(image); length = ReadBlobMSBLong(image); } offset = TellBlob(image); skip_layers = MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (psd_info.has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " read composite only"); skip_layers = MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image, image_info, &psd_info, skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } /* * Skip the rest of the layer and mask information. */ (void)SeekBlob(image, offset + length, SEEK_SET); } /* * If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); ThrowReaderException(CorruptImageError, "UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); (void)CloseBlob(image); return (GetFirstImageInList(image)); } /* * Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void)LogMagickEvent(CoderEvent, GetMagickModule(), " reading the precombined layer"); image_list_length = GetImageListLength(image); if ((psd_info.has_merged_image != MagickFalse) || (image_list_length == 1)) psd_info.has_merged_image = (MagickBooleanType) ReadPSDMergedImage( image_info, image, &psd_info, exception); if ((psd_info.has_merged_image == MagickFalse) && (image_list_length == 1) && (length != 0)) { (void)SeekBlob(image, offset, SEEK_SET); status = ReadPSDLayersInternal(image, image_info, &psd_info, MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } image_list_length = GetImageListLength(image); } if (psd_info.has_merged_image == MagickFalse) { Image * merged; if (image_list_length == 1) { if (profile != (StringInfo *) NULL) profile = DestroyStringInfo(profile); ThrowReaderException(CorruptImageError, "InsufficientImageDataInFile"); } image->background_color.alpha = (MagickRealType) TransparentAlpha; image->background_color.alpha_trait = BlendPixelTrait; (void)SetImageBackgroundColor(image, exception); merged = MergeImageLayers(image, FlattenLayer, exception); if (merged == (Image *) NULL) { (void)CloseBlob(image); image = DestroyImageList(image); return ((Image *) NULL); } ReplaceImageInList(&image, merged); } if (profile != (StringInfo *) NULL) { Image * next; i = 0; next = image; while (next != (Image *) NULL) { if (PSDSkipImage(&psd_info, image_info, i++) == MagickFalse) (void)SetImageProfile(next, GetStringInfoName(profile), profile, exception); next = next->next; } profile = DestroyStringInfo(profile); } (void)CloseBlob(image); return (GetFirstImageInList(image)); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % R e g i s t e r P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % RegisterPSDImage() adds properties for the PSD image format to % * the list of supported formats. The properties include the image format % * tag, a method to read and/or write the format, whether the format % * supports the saving of more than one frame to the same file or blob, % * whether the format supports native in-memory I/O, and a brief % * description of the format. % % The format of the RegisterPSDImage method * is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo * entry; entry = AcquireMagickInfo("PSD", "PSB", "Adobe Large Document Format"); entry->decoder = (DecodeImageHandler *) ReadPSDImage; entry->encoder = (EncodeImageHandler *) WritePSDImage; entry->magick = (IsImageFormatHandler *) IsPSD; entry->flags |= CoderDecoderSeekableStreamFlag; entry->flags |= CoderEncoderSeekableStreamFlag; (void)RegisterMagickInfo(entry); entry = AcquireMagickInfo("PSD", "PSD", "Adobe Photoshop bitmap"); entry->decoder = (DecodeImageHandler *) ReadPSDImage; entry->encoder = (EncodeImageHandler *) WritePSDImage; entry->magick = (IsImageFormatHandler *) IsPSD; entry->flags |= CoderDecoderSeekableStreamFlag; entry->flags |= CoderEncoderSeekableStreamFlag; (void)RegisterMagickInfo(entry); return (MagickImageCoderSignature); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % U n r e g i s t e r P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % UnregisterPSDImage() removes format registrations made by the % * PSD module from the list of supported formats. % % The format of the * UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void)UnregisterMagickInfo("PSB"); (void)UnregisterMagickInfo("PSD"); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % W r i t e P S D I m a g e * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded * image format. % % The format of the WritePSDImage method is: % % * MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, * % ExceptionInfo *exception) % % A description of each parameter * follows. % % o image_info: the image info. % % o image: The image. * % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo * psd_info, Image * image, const size_t offset) { if (psd_info->version == 1) return (WriteBlobMSBShort(image, (unsigned short)offset)); return (WriteBlobMSBLong(image, (unsigned int)offset)); } static inline ssize_t WritePSDOffset(const PSDInfo * psd_info, Image * image, const MagickSizeType size, const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset = TellBlob(image); (void)SeekBlob(image, offset, SEEK_SET); if (psd_info->version == 1) result = WriteBlobMSBShort(image, (unsigned short)size); else result = WriteBlobMSBLong(image, (unsigned int)size); (void)SeekBlob(image, current_offset, SEEK_SET); return (result); } static inline ssize_t SetPSDSize(const PSDInfo * psd_info, Image * image, const MagickSizeType size) { if (psd_info->version == 1) return (WriteBlobLong(image, (unsigned int)size)); return (WriteBlobLongLong(image, size)); } static inline ssize_t WritePSDSize(const PSDInfo * psd_info, Image * image, const MagickSizeType size, const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset = TellBlob(image); (void)SeekBlob(image, offset, SEEK_SET); result = SetPSDSize(psd_info, image, size); (void)SeekBlob(image, current_offset, SEEK_SET); return (result); } static size_t PSDPackbitsEncodeImage(Image * image, const size_t length, const unsigned char *pixels, unsigned char *compact_pixels, ExceptionInfo * exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* * Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(pixels != (unsigned char *)NULL); assert(compact_pixels != (unsigned char *)NULL); packbits = (unsigned char *)AcquireQuantumMemory(128UL, sizeof(*packbits)); if (packbits == (unsigned char *)NULL) ThrowBinaryException(ResourceLimitError, "MemoryAllocationFailed", image->filename); q = compact_pixels; for (i = (ssize_t) length; i != 0;) { switch (i) { case 1: { i--; *q++ = (unsigned char)0; *q++ = (*pixels); break; } case 2: { i -= 2; *q++ = (unsigned char)1; *q++ = (*pixels); *q++ = pixels[1]; break; } case 3: { i -= 3; if ((*pixels == *(pixels + 1)) && (*(pixels + 1) == *(pixels + 2))) { *q++ = (unsigned char)((256 - 3) + 1); *q++ = (*pixels); break; } *q++ = (unsigned char)2; *q++ = (*pixels); *q++ = pixels[1]; *q++ = pixels[2]; break; } default: { if ((*pixels == *(pixels + 1)) && (*(pixels + 1) == *(pixels + 2))) { /* * Packed run. */ count = 3; while (((ssize_t) count < i) && (*pixels == *(pixels + count))) { count++; if (count >= 127) break; } i -= count; *q++ = (unsigned char)((256 - count) + 1); *q++ = (*pixels); pixels += count; break; } /* * Literal run. */ count = 0; while ((*(pixels + count) != *(pixels + count + 1)) || (*(pixels + count + 1) != *(pixels + count + 2))) { packbits[count + 1] = pixels[count]; count++; if (((ssize_t) count >= (i - 3)) || (count >= 127)) break; } i -= count; *packbits = (unsigned char)(count - 1); for (j = 0; j <= (ssize_t) count; j++) *q++ = packbits[j]; pixels += count; break; } } } *q++ = (unsigned char)128; /* EOD marker */ packbits = (unsigned char *)RelinquishMagickMemory(packbits); return ((size_t) (q - compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo * psd_info, Image * image, const Image * next_image, const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length = (size_t) WriteBlobShort(image, RLE); for (i = 0; i < channels; i++) for (y = 0; y < (ssize_t) next_image->rows; y++) length += SetPSDOffset(psd_info, image, 0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length = (size_t) WriteBlobShort(image, ZipWithoutPrediction); #endif else length = (size_t) WriteBlobShort(image, Raw); return (length); } static size_t WritePSDChannel(const PSDInfo * psd_info, const ImageInfo * image_info, Image * image, Image * next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset, const MagickBooleanType separate, const CompressionType compression, ExceptionInfo * exception) { MagickBooleanType monochrome; QuantumInfo * quantum_info; register const Quantum * p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels = (unsigned char *)NULL; flush = Z_NO_FLUSH; #endif count = 0; if (separate != MagickFalse) { size_offset = TellBlob(image) + 2; count += WriteCompressionStart(psd_info, image, next_image, compression, 1); } if (next_image->depth > 8) next_image->depth = 16; monochrome = IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info = AcquireQuantumInfo(image_info, next_image); if (quantum_info == (QuantumInfo *) NULL) return (0); pixels = (unsigned char *)GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels = (unsigned char *)AcquireQuantumMemory( MagickMinBufferExtent, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *)NULL) { quantum_info = DestroyQuantumInfo(quantum_info); return (0); } memset(&stream, 0, sizeof(stream)); stream.data_type = Z_BINARY; level = Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level = (int)image_info->quality; if (deflateInit(&stream, level) != Z_OK) { quantum_info = DestroyQuantumInfo(quantum_info); compressed_pixels = (unsigned char *)RelinquishMagickMemory( compressed_pixels); return (0); } } #endif for (y = 0; y < (ssize_t) next_image->rows; y++) { p = GetVirtualPixels(next_image, 0, y, next_image->columns, 1, exception); if (p == (const Quantum *)NULL) break; length = ExportQuantumPixels(next_image, (CacheView *) NULL, quantum_info, quantum_type, pixels, exception); if (monochrome != MagickFalse) for (i = 0; i < (ssize_t) length; i++) pixels[i] = (~pixels[i]); if (compression == RLECompression) { length = PSDPackbitsEncodeImage(image, length, pixels, compact_pixels, exception); count += WriteBlob(image, length, compact_pixels); size_offset += WritePSDOffset(psd_info, image, length, size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in = (uInt) length; stream.next_in = (Bytef *) pixels; if (y == (ssize_t) next_image->rows - 1) flush = Z_FINISH; do { stream.avail_out = (uInt) MagickMinBufferExtent; stream.next_out = (Bytef *) compressed_pixels; if (deflate(&stream, flush) == Z_STREAM_ERROR) break; length = (size_t) MagickMinBufferExtent - stream.avail_out; if (length > 0) count += WriteBlob(image, length, compressed_pixels); } while (stream.avail_out == 0); } #endif else count += WriteBlob(image, length, pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void)deflateEnd(&stream); compressed_pixels = (unsigned char *)RelinquishMagickMemory( compressed_pixels); } #endif quantum_info = DestroyQuantumInfo(quantum_info); return (count); } static unsigned char * AcquireCompactPixels(const Image * image, ExceptionInfo * exception) { size_t packet_size; unsigned char *compact_pixels; packet_size = image->depth > 8UL ? 2UL : 1UL; compact_pixels = (unsigned char *)AcquireQuantumMemory((9 * image->columns) + 1, packet_size * sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *)NULL) { (void)ThrowMagickException(exception, GetMagickModule(), ResourceLimitError, "MemoryAllocationFailed", "`%s'", image->filename); } return (compact_pixels); } static size_t WritePSDChannels(const PSDInfo * psd_info, const ImageInfo * image_info, Image * image, Image * next_image, MagickOffsetType size_offset, const MagickBooleanType separate, ExceptionInfo * exception) { CompressionType compression; Image * mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count = 0; offset_length = 0; rows_offset = 0; compact_pixels = (unsigned char *)NULL; compression = next_image->compression; if (image_info->compression != UndefinedCompression) compression = image_info->compression; if (compression == RLECompression) { compact_pixels = AcquireCompactPixels(next_image, exception); if (compact_pixels == (unsigned char *)NULL) return (0); } channels = 1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsImageGray(next_image) != MagickFalse)) { if (IsImageGray(next_image) == MagickFalse) channels = (size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset = TellBlob(image) + 2; count += WriteCompressionStart(psd_info, image, next_image, compression, (ssize_t) channels); offset_length = (next_image->rows * (psd_info->version == 1 ? 2 : 4)); } size_offset += 2; if ((next_image->storage_class == PseudoClass) && (IsImageGray(next_image) == MagickFalse)) { length = WritePSDChannel(psd_info, image_info, image, next_image, IndexQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } else { if (IsImageGray(next_image) != MagickFalse) { length = WritePSDChannel(psd_info, image_info, image, next_image, GrayQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } else { if (next_image->colorspace == CMYKColorspace) (void)NegateCMYK(next_image, exception); length = WritePSDChannel(psd_info, image_info, image, next_image, RedQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; length = WritePSDChannel(psd_info, image_info, image, next_image, GreenQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; length = WritePSDChannel(psd_info, image_info, image, next_image, BlueQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; if (next_image->colorspace == CMYKColorspace) { length = WritePSDChannel(psd_info, image_info, image, next_image, BlackQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length = WritePSDChannel(psd_info, image_info, image, next_image, AlphaQuantum, compact_pixels, rows_offset, separate, compression, exception); if (separate != MagickFalse) size_offset += WritePSDSize(psd_info, image, length, size_offset) + 2; else rows_offset += offset_length; count += length; } } compact_pixels = (unsigned char *)RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void)NegateCMYK(next_image, exception); if (separate != MagickFalse) { const char *property; property = GetImageArtifact(next_image, "psd:opacity-mask"); if (property != (const char *)NULL) { mask = (Image *) GetImageRegistry(ImageRegistryType, property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels = AcquireCompactPixels(mask, exception); if (compact_pixels == (unsigned char *)NULL) return (0); } length = WritePSDChannel(psd_info, image_info, image, mask, RedQuantum, compact_pixels, rows_offset, MagickTrue, compression, exception); (void)WritePSDSize(psd_info, image, length, size_offset); count += length; compact_pixels = (unsigned char *)RelinquishMagickMemory( compact_pixels); } } } return (count); } static size_t WritePascalString(Image * image, const char *value, size_t padding) { size_t count, length; register ssize_t i; /* * Max length is 255. */ count = 0; length = (strlen(value) > 255UL) ? 255UL : strlen(value); if (length == 0) count += WriteBlobByte(image, 0); else { count += WriteBlobByte(image, (unsigned char)length); count += WriteBlob(image, length, (const unsigned char *)value); } length++; if ((length % padding) == 0) return (count); for (i = 0; i < (ssize_t) (padding - (length % padding)); i++) count += WriteBlobByte(image, 0); return (count); } static void WriteResolutionResourceBlock(Image * image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution = 2.54 * 65536.0 * image->resolution.x + 0.5; y_resolution = 2.54 * 65536.0 * image->resolution.y + 0.5; units = 2; } else { x_resolution = 65536.0 * image->resolution.x + 0.5; y_resolution = 65536.0 * image->resolution.y + 0.5; units = 1; } (void)WriteBlob(image, 4, (const unsigned char *)"8BIM"); (void)WriteBlobMSBShort(image, 0x03ED); (void)WriteBlobMSBShort(image, 0); (void)WriteBlobMSBLong(image, 16); /* resource size */ (void)WriteBlobMSBLong(image, (unsigned int)(x_resolution + 0.5)); (void)WriteBlobMSBShort(image, units); /* horizontal resolution unit */ (void)WriteBlobMSBShort(image, units); /* width unit */ (void)WriteBlobMSBLong(image, (unsigned int)(y_resolution + 0.5)); (void)WriteBlobMSBShort(image, units); /* vertical resolution unit */ (void)WriteBlobMSBShort(image, units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo * psd_info, Image * image, const signed short channel) { size_t count; count = (size_t) WriteBlobShort(image, (const unsigned short)channel); count += SetPSDSize(psd_info, image, 0); return (count); } static void RemoveICCProfileFromResourceBlock(StringInfo * bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length = GetStringInfoLength(bim_profile); if (length < 16) return; datum = GetStringInfoDatum(bim_profile); for (p = datum; (p >= datum) && (p < (datum + length - 16));) { register unsigned char *q; q = (unsigned char *)p; if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) break; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); if (id == 0x0000040f) { ssize_t quantum; quantum = PSDQuantum(count) + 12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q + quantum < (datum + length - 16))) (void)memmove(q, q + quantum, length - quantum - (q - datum)); SetStringInfoLength(bim_profile, length - quantum); } break; } p += count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo * bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length = GetStringInfoLength(bim_profile); if (length < 16) return; datum = GetStringInfoDatum(bim_profile); for (p = datum; (p >= datum) && (p < (datum + length - 16));) { register unsigned char *q; ssize_t cnt; q = (unsigned char *)p; if (LocaleNCompare((const char *)p, "8BIM", 4) != 0) return; p = PushLongPixel(MSBEndian, p, &long_sans); p = PushShortPixel(MSBEndian, p, &id); p = PushShortPixel(MSBEndian, p, &short_sans); p = PushLongPixel(MSBEndian, p, &count); cnt = PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length - 12)) && ((ssize_t) length - (cnt + 12) - (q - datum)) > 0) { (void)memmove(q, q + cnt + 12, length - (cnt + 12) - (q - datum)); SetStringInfoLength(bim_profile, length - (cnt + 12)); break; } p += count; if ((count & 0x01) != 0) p++; } } static const StringInfo * GetAdditionalInformation(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* * Whitelist of keys from: * https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo * info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo * profile; unsigned char *p; unsigned int size; info = GetImageProfile(image, "psd:additional-info"); if (info == (const StringInfo *)NULL) return ((const StringInfo *)NULL); option = GetImageOption(image_info, "psd:additional-info"); if (LocaleCompare(option, "all") == 0) return (info); if (LocaleCompare(option, "selective") != 0) { profile = RemoveImageProfile(image, "psd:additional-info"); return (DestroyStringInfo(profile)); } length = GetStringInfoLength(info); p = GetStringInfoDatum(info); remaining_length = length; length = 0; while (remaining_length >= 12) { /* skip over signature */ p += 4; key[0] = (char)(*p++); key[1] = (char)(*p++); key[2] = (char)(*p++); key[3] = (char)(*p++); key[4] = '\0'; size = (unsigned int)(*p++) << 24; size |= (unsigned int)(*p++) << 16; size |= (unsigned int)(*p++) << 8; size |= (unsigned int)(*p++); size = size & 0xffffffff; remaining_length -= 12; if ((size_t) size > remaining_length) return ((const StringInfo *)NULL); found = MagickFalse; for (i = 0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key, allowed[i], PSDKeySize) != 0) continue; found = MagickTrue; break; } remaining_length -= (size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p = (unsigned char *)memmove(p - 12, p + size, remaining_length); continue; } length += (size_t) size + 12; p += size; } profile = RemoveImageProfile(image, "psd:additional-info"); if (length == 0) return (DestroyStringInfo(profile)); SetStringInfoLength(profile, (const size_t)length); (void)SetImageProfile(image, "psd:additional-info", info, exception); return (profile); } static MagickBooleanType WritePSDLayersInternal(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, size_t * layers_size, ExceptionInfo * exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo * info; Image * base_image, *next_image; MagickBooleanType status; MagickOffsetType * layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status = MagickTrue; base_image = GetNextImageInList(image); if (base_image == (Image *) NULL) base_image = image; size = 0; size_offset = TellBlob(image); (void)SetPSDSize(psd_info, image, 0); layer_count = 0; for (next_image = base_image; next_image != NULL;) { layer_count++; next_image = GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size += WriteBlobShort(image, -(unsigned short)layer_count); else size += WriteBlobShort(image, (unsigned short)layer_count); layer_size_offsets = (MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count, sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError, "MemoryAllocationFailed"); layer_index = 0; for (next_image = base_image; next_image != NULL;) { Image * mask; unsigned char default_color; unsigned short channels, total_channels; mask = (Image *) NULL; property = GetImageArtifact(next_image, "psd:opacity-mask"); default_color = 0; if (property != (const char *)NULL) { mask = (Image *) GetImageRegistry(ImageRegistryType, property, exception); default_color = (unsigned char)(strlen(property) == 9 ? 255 : 0); } size += WriteBlobSignedLong(image, (signed int)next_image->page.y); size += WriteBlobSignedLong(image, (signed int)next_image->page.x); size += WriteBlobSignedLong(image, (signed int)(next_image->page.y + next_image->rows)); size += WriteBlobSignedLong(image, (signed int)(next_image->page.x + next_image->columns)); channels = 1; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels = (unsigned short)(next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels = channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size += WriteBlobShort(image, total_channels); layer_size_offsets[layer_index++] = TellBlob(image); for (i = 0; i < (ssize_t) channels; i++) size += WriteChannelSize(psd_info, image, (signed short)i); if (next_image->alpha_trait != UndefinedPixelTrait) size += WriteChannelSize(psd_info, image, -1); if (mask != (Image *) NULL) size += WriteChannelSize(psd_info, image, -2); size += WriteBlobString(image, image->endian == LSBEndian ? "MIB8" : "8BIM"); size += WriteBlobString(image, CompositeOperatorToPSDBlendMode(next_image)); property = GetImageArtifact(next_image, "psd:layer.opacity"); if (property != (const char *)NULL) { Quantum opacity; opacity = (Quantum) StringToInteger(property); size += WriteBlobByte(image, ScaleQuantumToChar(opacity)); (void)ApplyPSDLayerOpacity(next_image, opacity, MagickTrue, exception); } else size += WriteBlobByte(image, 255); size += WriteBlobByte(image, 0); size += WriteBlobByte(image, (const unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - * visible, etc. */ size += WriteBlobByte(image, 0); info = GetAdditionalInformation(image_info, next_image, exception); property = (const char *)GetImageProperty(next_image, "label", exception); if (property == (const char *)NULL) { (void)FormatLocaleString(layer_name, MagickPathExtent, "L%.20g", (double)layer_index); property = layer_name; } name_length = strlen(property) + 1; if ((name_length % 4) != 0) name_length += (4 - (name_length % 4)); if (info != (const StringInfo *)NULL) name_length += GetStringInfoLength(info); name_length += 8; if (mask != (Image *) NULL) name_length += 20; size += WriteBlobLong(image, (unsigned int)name_length); if (mask == (Image *) NULL) size += WriteBlobLong(image, 0); else { if (mask->compose != NoCompositeOp) (void)ApplyPSDOpacityMask(next_image, mask, ScaleCharToQuantum( default_color), MagickTrue, exception); mask->page.y += image->page.y; mask->page.x += image->page.x; size += WriteBlobLong(image, 20); size += WriteBlobSignedLong(image, (const signed int)mask->page.y); size += WriteBlobSignedLong(image, (const signed int)mask->page.x); size += WriteBlobSignedLong(image, (const signed int)(mask->rows + mask->page.y)); size += WriteBlobSignedLong(image, (const signed int)(mask->columns + mask->page.x)); size += WriteBlobByte(image, default_color); size += WriteBlobByte(image, (const unsigned char) (mask->compose == NoCompositeOp ? 2 : 0)); size += WriteBlobMSBShort(image, 0); } size += WriteBlobLong(image, 0); size += WritePascalString(image, property, 4); if (info != (const StringInfo *)NULL) size += WriteBlob(image, GetStringInfoLength(info), GetStringInfoDatum(info)); next_image = GetNextImageInList(next_image); } /* * Now the image data! */ next_image = base_image; layer_index = 0; while (next_image != NULL) { length = WritePSDChannels(psd_info, image_info, image, next_image, layer_size_offsets[layer_index++], MagickTrue, exception); if (length == 0) { status = MagickFalse; break; } size += length; next_image = GetNextImageInList(next_image); } /* * Write the total size */ if (layers_size != (size_t *) NULL) *layers_size = size; if ((size / 2) != ((size + 1) / 2)) rounded_size = size + 1; else rounded_size = size; (void)WritePSDSize(psd_info, image, rounded_size, size_offset); layer_size_offsets = (MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* * Remove the opacity mask from the registry */ next_image = base_image; while (next_image != (Image *) NULL) { property = GetImageArtifact(next_image, "psd:opacity-mask"); if (property != (const char *)NULL) (void)DeleteImageRegistry(property); next_image = GetNextImageInList(next_image); } return (status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo * image_info, const PSDInfo * psd_info, ExceptionInfo * exception) { PolicyDomain domain; PolicyRights rights; domain = CoderPolicyDomain; rights = WritePolicyRights; if (IsRightsAuthorized(domain, rights, "PSD") == MagickFalse) return (MagickTrue); return WritePSDLayersInternal(image, image_info, psd_info, (size_t *) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo * image_info, Image * image, ExceptionInfo * exception) { const StringInfo * icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo * bim_profile; /* * Open image file. */ assert(image_info != (const ImageInfo *)NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status = OpenBlob(image_info, image, WriteBinaryBlobMode, exception); if (status == MagickFalse) return (status); packet_size = (size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size += image->depth > 8 ? 2 : 1; psd_info.version = 1; if ((LocaleCompare(image_info->magick, "PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version = 2; (void)WriteBlob(image, 4, (const unsigned char *)"8BPS"); (void)WriteBlobMSBShort(image, psd_info.version); /* version */ for (i = 1; i <= 6; i++) (void)WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image, "icc") == (StringInfo *) NULL) && (SetImageGray(image, exception) != MagickFalse)) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void)SetImageStorageClass(image, DirectClass, exception); if (image->colorspace != CMYKColorspace) num_channels = (image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels = (image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void)WriteBlobMSBShort(image, (unsigned short)num_channels); (void)WriteBlobMSBLong(image, (unsigned int)image->rows); (void)WriteBlobMSBLong(image, (unsigned int)image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* * Write depth & mode. */ monochrome = IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void)WriteBlobMSBShort(image, (unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void)WriteBlobMSBShort(image, (unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void)WriteBlobMSBShort(image, (unsigned short)(image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void)TransformImageColorspace(image, sRGBColorspace, exception); (void)WriteBlobMSBShort(image, (unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void)TransformImageColorspace(image, CMYKColorspace, exception); (void)WriteBlobMSBShort(image, CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void)WriteBlobMSBLong(image, 0); else { /* * Write PSD raster colormap. */ (void)WriteBlobMSBLong(image, 768); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(ClampToQuantum( image->colormap[i].red))); for (; i < 256; i++) (void)WriteBlobByte(image, 0); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(ClampToQuantum( image->colormap[i].green))); for (; i < 256; i++) (void)WriteBlobByte(image, 0); for (i = 0; i < (ssize_t) image->colors; i++) (void)WriteBlobByte(image, ScaleQuantumToChar(ClampToQuantum( image->colormap[i].blue))); for (; i < 256; i++) (void)WriteBlobByte(image, 0); } /* * Image resource block. */ length = 28; /* 0x03EB */ bim_profile = (StringInfo *) GetImageProfile(image, "8bim"); icc_profile = GetImageProfile(image, "icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile = CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length += PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *)NULL) length += PSDQuantum(GetStringInfoLength(icc_profile)) + 12; (void)WriteBlobMSBLong(image, (unsigned int)length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void)WriteBlob(image, GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile = DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void)WriteBlob(image, 4, (const unsigned char *)"8BIM"); (void)WriteBlobMSBShort(image, 0x0000040F); (void)WriteBlobMSBShort(image, 0); (void)WriteBlobMSBLong(image, (unsigned int)GetStringInfoLength( icc_profile)); (void)WriteBlob(image, GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void)WriteBlobByte(image, 0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset = TellBlob(image); (void)SetPSDSize(&psd_info, image, 0); status = WritePSDLayersInternal(image, image_info, &psd_info, &size, exception); size_offset += WritePSDSize(&psd_info, image, size + (psd_info.version == 1 ? 8 : 12), size_offset); } (void)WriteBlobMSBLong(image, 0); /* user mask data */ /* * Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression = image->compression; if (image_info->compression != UndefinedCompression) image->compression = image_info->compression; if (image->compression == ZipCompression) image->compression = RLECompression; if (WritePSDChannels(&psd_info, image_info, image, image, 0, MagickFalse, exception) == 0) status = MagickFalse; image->compression = compression; } (void)CloseBlob(image); return (status); }
GB_binop__iseq_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_int32 // A.*B function (eWiseMult): GB_AemultB__iseq_int32 // A*D function (colscale): GB_AxD__iseq_int32 // D*A function (rowscale): GB_DxB__iseq_int32 // C+=B function (dense accum): GB_Cdense_accumB__iseq_int32 // C+=b function (dense accum): GB_Cdense_accumb__iseq_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_int32 // C=scalar+B GB_bind1st__iseq_int32 // C=scalar+B' GB_bind1st_tran__iseq_int32 // C=A+scalar GB_bind2nd__iseq_int32 // C=A'+scalar GB_bind2nd_tran__iseq_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT32 || GxB_NO_ISEQ_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__iseq_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__iseq_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__iseq_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_int32 // A.*B function (eWiseMult): GB_AemultB__iseq_int32 // A*D function (colscale): GB_AxD__iseq_int32 // D*A function (rowscale): GB_DxB__iseq_int32 // C+=B function (dense accum): GB_Cdense_accumB__iseq_int32 // C+=b function (dense accum): GB_Cdense_accumb__iseq_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_int32 // C=scalar+B GB_bind1st__iseq_int32 // C=scalar+B' GB_bind1st_tran__iseq_int32 // C=A+scalar GB_bind2nd__iseq_int32 // C=A'+scalar GB_bind2nd_tran__iseq_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT32 || GxB_NO_ISEQ_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__iseq_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__iseq_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__iseq_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_int32 // A.*B function (eWiseMult): GB_AemultB__iseq_int32 // A*D function (colscale): GB_AxD__iseq_int32 // D*A function (rowscale): GB_DxB__iseq_int32 // C+=B function (dense accum): GB_Cdense_accumB__iseq_int32 // C+=b function (dense accum): GB_Cdense_accumb__iseq_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_int32 // C=scalar+B GB_bind1st__iseq_int32 // C=scalar+B' GB_bind1st_tran__iseq_int32 // C=A+scalar GB_bind2nd__iseq_int32 // C=A'+scalar GB_bind2nd_tran__iseq_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_INT32 || GxB_NO_ISEQ_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__iseq_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__iseq_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__iseq_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__iseq_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
backprop.c
/* ****************************************************************** * HISTORY * 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University * Prepared for 15-681, Fall 1994. * Modified by Shuai Che ****************************************************************** */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> //#define OPEN #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define fastcopy(to,from,len)\ {\ register char *_to,*_from;\ register int _i,_l;\ _to = (char *)(to);\ _from = (char *)(from);\ _l = (len);\ for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\ } /*** Return random number between 0.0 and 1.0 ***/ float drnd() { return ((float) rand() / (float) BIGRND); } /*** Return random number between -1.0 and 1.0 ***/ float dpn1() { return ((drnd() * 2.0) - 1.0); } /*** The squashing function. Currently, it's a sigmoid. ***/ float squash(x) float x; { float m; //x = -x; //m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120; //return(1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } /*** Allocate 1d array of floats ***/ float *alloc_1d_dbl(n) int n; { float *new; new = (float *) malloc ((unsigned) (n * sizeof (float))); if (new == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new); } /*** Allocate 2d array of floats ***/ float **alloc_2d_dbl(m, n) int m, n; { int i; float **new; new = (float **) malloc ((unsigned) (m * sizeof (float *))); if (new == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } for (i = 0; i < m; i++) { new[i] = alloc_1d_dbl(n); } return (new); } bpnn_randomize_weights(w, m, n) float **w; int m, n; { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = (float) rand()/RAND_MAX; // w[i][j] = dpn1(); } } } bpnn_randomize_row(w, m) float *w; int m; { int i; for (i = 0; i <= m; i++) { //w[i] = (float) rand()/RAND_MAX; w[i] = 0.1; } } bpnn_zero_weights(w, m, n) float **w; int m, n; { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } } } void bpnn_initialize(seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } BPNN *bpnn_internal_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out; { BPNN *newnet; newnet = (BPNN *) malloc (sizeof (BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(net) BPNN *net; { int n1, n2, i; n1 = net->input_n; n2 = net->hidden_n; free((char *) net->input_units); free((char *) net->hidden_units); free((char *) net->output_units); free((char *) net->hidden_delta); free((char *) net->output_delta); free((char *) net->target); for (i = 0; i <= n1; i++) { free((char *) net->input_weights[i]); free((char *) net->input_prev_weights[i]); } free((char *) net->input_weights); free((char *) net->input_prev_weights); for (i = 0; i <= n2; i++) { free((char *) net->hidden_weights[i]); free((char *) net->hidden_prev_weights[i]); } free((char *) net->hidden_weights); free((char *) net->hidden_prev_weights); free((char *) net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN *bpnn_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out; { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } void bpnn_layerforward(l1, l2, conn, n1, n2) float *l1, *l2, **conn; int n1, n2; { float sum; int j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static) #endif /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k][j] * l1[k]; } l2[j] = squash(sum); } } //extern "C" void bpnn_output_error(delta, target, output, nj, err) float *delta, *target, *output, *err; int nj; { int j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } void bpnn_hidden_error(delta_h, nh, delta_o, no, who, hidden, err) float *delta_h, *delta_o, *hidden, **who, *err; int nh, no; { int j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j][k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw) float *delta, *ly, **w, **oldw; { float new_dw; int k, j; ly[0] = 1.0; //eta = 0.3; //momentum = 0.3; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for \ shared(oldw, w, delta) \ private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j])); w[k][j] += new_dw; oldw[k][j] = new_dw; } } } void bpnn_feedforward(net) BPNN *net; { int in, hid, out; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); } void bpnn_train(net, eo, eh) BPNN *net; float *eo, *eh; { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); /*** Compute error on output and hidden units. ***/ bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); *eo = out_err; *eh = hid_err; /*** Adjust input and hidden weights. ***/ bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); } void bpnn_save(net, filename) BPNN *net; char *filename; { int n1, n2, n3, i, j, memcnt; float dvalue, **w; char *mem; ///add// FILE *pFile; pFile = fopen( filename, "w+" ); /////// /* if ((fd = creat(filename, 0644)) == -1) { printf("BPNN_SAVE: Cannot create '%s'\n", filename); return; } */ n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n; printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename); //fflush(stdout); //write(fd, (char *) &n1, sizeof(int)); //write(fd, (char *) &n2, sizeof(int)); //write(fd, (char *) &n3, sizeof(int)); fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile); fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile); fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile); memcnt = 0; w = net->input_weights; mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float))); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n1+1) * (n2+1) * sizeof(float)); fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile); free(mem); memcnt = 0; w = net->hidden_weights; mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float))); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n2+1) * (n3+1) * sizeof(float)); fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile); free(mem); fclose(pFile); return; } BPNN *bpnn_read(filename) char *filename; { char *mem; BPNN *new; int fd, n1, n2, n3, i, j, memcnt; if ((fd = open(filename, 0, 0644)) == -1) { return (NULL); } printf("Reading '%s'\n", filename); //fflush(stdout); read(fd, (char *) &n1, sizeof(int)); read(fd, (char *) &n2, sizeof(int)); read(fd, (char *) &n3, sizeof(int)); new = bpnn_internal_create(n1, n2, n3); printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3); printf("Reading input weights..."); //fflush(stdout); memcnt = 0; mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float))); read(fd, mem, (n1+1) * (n2+1) * sizeof(float)); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); printf("Done\nReading hidden weights..."); //fflush(stdout); memcnt = 0; mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float))); read(fd, mem, (n2+1) * (n3+1) * sizeof(float)); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); close(fd); printf("Done\n"); //fflush(stdout); bpnn_zero_weights(new->input_prev_weights, n1, n2); bpnn_zero_weights(new->hidden_prev_weights, n2, n3); return (new); }
/* * ***************************************************************** * * HISTORY 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University Prepared * for 15-681, Fall 1994. Modified by Shuai Che ***************************************************************** * */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> // #define OPEN #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define fastcopy(to,from,len)\ { \ register char *_to, *_from; \ register int _i, _l; \ _to = (char *)(to); \ _from = (char *)(from); \ _l = (len); \ for (_i = 0; _i < _l; _i++) *_to++ = *_from++; \ } /*** Return random number between 0.0 and 1.0 ***/ float drnd() { return ((float)rand() / (float)BIGRND); } /*** Return random number between -1.0 and 1.0 ***/ float dpn1() { return ((drnd() * 2.0) - 1.0); } /*** The squashing function. Currently, it's a sigmoid. ***/ float squash(x) float x; { float m; //x = -x; //m = 1 + x + x * x / 2 + x * x * x / 6 + x * x * x * x / 24 + x * x * x * x * x / 120; //return (1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } /*** Allocate 1d array of floats ***/ float * alloc_1d_dbl(n) int n; { float *new; new = (float *)malloc((unsigned)(n * sizeof(float))); if (new == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new); } /*** Allocate 2d array of floats ***/ float ** alloc_2d_dbl(m, n) int m, n; { int i; float **new; new = (float **)malloc((unsigned)(m * sizeof(float *))); if (new == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } for (i = 0; i < m; i++) { new[i] = alloc_1d_dbl(n); } return (new); } bpnn_randomize_weights(w, m, n) float **w; int m, n; { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = (float)rand() / RAND_MAX; //w[i][j] = dpn1(); } } } bpnn_randomize_row(w, m) float *w; int m; { int i; for (i = 0; i <= m; i++) { //w[i] = (float)rand() / RAND_MAX; w[i] = 0.1; } } bpnn_zero_weights(w, m, n) float **w; int m, n; { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } } } void bpnn_initialize(seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } BPNN * bpnn_internal_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out; { BPNN * newnet; newnet = (BPNN *) malloc(sizeof(BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(net) BPNN * net; { int n1, n2, i; n1 = net->input_n; n2 = net->hidden_n; free((char *)net->input_units); free((char *)net->hidden_units); free((char *)net->output_units); free((char *)net->hidden_delta); free((char *)net->output_delta); free((char *)net->target); for (i = 0; i <= n1; i++) { free((char *)net->input_weights[i]); free((char *)net->input_prev_weights[i]); } free((char *)net->input_weights); free((char *)net->input_prev_weights); for (i = 0; i <= n2; i++) { free((char *)net->hidden_weights[i]); free((char *)net->hidden_prev_weights[i]); } free((char *)net->hidden_weights); free((char *)net->hidden_prev_weights); free((char *)net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN * bpnn_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out; { BPNN * newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else /* */ bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif /* */ bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } void bpnn_layerforward(l1, l2, conn, n1, n2) float *l1, *l2, **conn; int n1, n2; { float sum; int j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #endif /* */ /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k][j] * l1[k]; } l2[j] = squash(sum); } } //extern "C" void bpnn_output_error(delta, target, output, nj, err) float *delta, *target, *output, *err; int nj; { int j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } void bpnn_hidden_error(delta_h, nh, delta_o, no, who, hidden, err) float *delta_h, *delta_o, *hidden, **who, *err; int nh, no; { int j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j][k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw) float *delta, *ly, **w, **oldw; { float new_dw; int k, j; ly[0] = 1.0; //eta = 0.3; //momentum = 0.3; #ifdef OPEN omp_set_num_threads(NUM_THREAD); shared(oldw, w, delta) \ private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif /* */ for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j])); w[k][j] += new_dw; oldw[k][j] = new_dw; } } } void bpnn_feedforward(net) BPNN * net; { int in, hid, out; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); } void bpnn_train(net, eo, eh) BPNN * net; float *eo, *eh; { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); /*** Compute error on output and hidden units. ***/ bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); *eo = out_err; *eh = hid_err; /*** Adjust input and hidden weights. ***/ bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); } void bpnn_save(net, filename) BPNN * net; char *filename; { int n1, n2, n3, i, j, memcnt; float dvalue, **w; char *mem; ///add // FILE * pFile; pFile = fopen(filename, "w+"); /////// /* * if ((fd = creat(filename, 0644)) == -1) { printf("BPNN_SAVE: Cannot * create '%s'\n", filename); return; } */ n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n; printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename); //fflush(stdout); //write(fd, (char *)&n1, sizeof(int)); //write(fd, (char *)&n2, sizeof(int)); //write(fd, (char *)&n3, sizeof(int)); fwrite((char *)&n1, sizeof(char), sizeof(char), pFile); fwrite((char *)&n2, sizeof(char), sizeof(char), pFile); fwrite((char *)&n3, sizeof(char), sizeof(char), pFile); memcnt = 0; w = net->input_weights; mem = (char *)malloc((unsigned)((n1 + 1) * (n2 + 1) * sizeof(float))); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n1 + 1) * (n2 + 1) * sizeof(float)); fwrite(mem, (unsigned)(sizeof(float)), (unsigned)((n1 + 1) * (n2 + 1) * sizeof(float)), pFile); free(mem); memcnt = 0; w = net->hidden_weights; mem = (char *)malloc((unsigned)((n2 + 1) * (n3 + 1) * sizeof(float))); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n2 + 1) * (n3 + 1) * sizeof(float)); fwrite(mem, sizeof(float), (unsigned)((n2 + 1) * (n3 + 1) * sizeof(float)), pFile); free(mem); fclose(pFile); return; } BPNN * bpnn_read(filename) char *filename; { char *mem; BPNN * new; int fd, n1, n2, n3, i, j, memcnt; if ((fd = open(filename, 0, 0644)) == -1) { return (NULL); } printf("Reading '%s'\n", filename); //fflush(stdout); read(fd, (char *)&n1, sizeof(int)); read(fd, (char *)&n2, sizeof(int)); read(fd, (char *)&n3, sizeof(int)); new = bpnn_internal_create(n1, n2, n3); printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3); printf("Reading input weights..."); //fflush(stdout); memcnt = 0; mem = (char *)malloc((unsigned)((n1 + 1) * (n2 + 1) * sizeof(float))); read(fd, mem, (n1 + 1) * (n2 + 1) * sizeof(float)); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); printf("Done\nReading hidden weights..."); //fflush(stdout); memcnt = 0; mem = (char *)malloc((unsigned)((n2 + 1) * (n3 + 1) * sizeof(float))); read(fd, mem, (n2 + 1) * (n3 + 1) * sizeof(float)); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); close(fd); printf("Done\n"); //fflush(stdout); bpnn_zero_weights(new->input_prev_weights, n1, n2); bpnn_zero_weights(new->hidden_prev_weights, n2, n3); return (new); }
/* * ***************************************************************** * * HISTORY 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University Prepared * for 15-681, Fall 1994. Modified by Shuai Che ***************************************************************** * */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> // #define OPEN #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define fastcopy(to,from,len)\ { \ register char *_to, *_from; \ register int _i, _l; \ _to = (char *)(to); \ _from = (char *)(from); \ _l = (len); \ for (_i = 0; _i < _l; _i++) *_to++ = *_from++; \ } /*** Return random number between 0.0 and 1.0 ***/ float drnd() { return ((float)rand() / (float)BIGRND); } /*** Return random number between -1.0 and 1.0 ***/ float dpn1() { return ((drnd() * 2.0) - 1.0); } /*** The squashing function. Currently, it's a sigmoid. ***/ float squash(x) float x; { float m; //x = -x; //m = 1 + x + x * x / 2 + x * x * x / 6 + x * x * x * x / 24 + x * x * x * x * x / 120; //return (1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } /*** Allocate 1d array of floats ***/ float * alloc_1d_dbl(n) int n; { float *new; new = (float *)malloc((unsigned)(n * sizeof(float))); if (new == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new); } /*** Allocate 2d array of floats ***/ float ** alloc_2d_dbl(m, n) int m, n; { int i; float **new; new = (float **)malloc((unsigned)(m * sizeof(float *))); if (new == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } for (i = 0; i < m; i++) { new[i] = alloc_1d_dbl(n); } return (new); } bpnn_randomize_weights(w, m, n) float **w; int m, n; { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = (float)rand() / RAND_MAX; //w[i][j] = dpn1(); } } } bpnn_randomize_row(w, m) float *w; int m; { int i; for (i = 0; i <= m; i++) { //w[i] = (float)rand() / RAND_MAX; w[i] = 0.1; } } bpnn_zero_weights(w, m, n) float **w; int m, n; { int i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i][j] = 0.0; } } } void bpnn_initialize(seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } BPNN * bpnn_internal_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out; { BPNN * newnet; newnet = (BPNN *) malloc(sizeof(BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(net) BPNN * net; { int n1, n2, i; n1 = net->input_n; n2 = net->hidden_n; free((char *)net->input_units); free((char *)net->hidden_units); free((char *)net->output_units); free((char *)net->hidden_delta); free((char *)net->output_delta); free((char *)net->target); for (i = 0; i <= n1; i++) { free((char *)net->input_weights[i]); free((char *)net->input_prev_weights[i]); } free((char *)net->input_weights); free((char *)net->input_prev_weights); for (i = 0; i <= n2; i++) { free((char *)net->hidden_weights[i]); free((char *)net->hidden_prev_weights[i]); } free((char *)net->hidden_weights); free((char *)net->hidden_prev_weights); free((char *)net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN * bpnn_create(n_in, n_hidden, n_out) int n_in, n_hidden, n_out; { BPNN * newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); #ifdef INITZERO bpnn_zero_weights(newnet->input_weights, n_in, n_hidden); #else /* */ bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); #endif /* */ bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } void bpnn_layerforward(l1, l2, conn, n1, n2) float *l1, *l2, **conn; int n1, n2; { float sum; int j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static) #endif /* */ /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k][j] * l1[k]; } l2[j] = squash(sum); } } //extern "C" void bpnn_output_error(delta, target, output, nj, err) float *delta, *target, *output, *err; int nj; { int j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } void bpnn_hidden_error(delta_h, nh, delta_o, no, who, hidden, err) float *delta_h, *delta_o, *hidden, **who, *err; int nh, no; { int j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j][k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } void bpnn_adjust_weights(delta, ndelta, ly, nly, w, oldw) float *delta, *ly, **w, **oldw; { float new_dw; int k, j; ly[0] = 1.0; //eta = 0.3; //momentum = 0.3; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for \ shared(oldw, w, delta) \ private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif /* */ for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j])); w[k][j] += new_dw; oldw[k][j] = new_dw; } } } void bpnn_feedforward(net) BPNN * net; { int in, hid, out; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); } void bpnn_train(net, eo, eh) BPNN * net; float *eo, *eh; { int in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); /*** Compute error on output and hidden units. ***/ bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); *eo = out_err; *eh = hid_err; /*** Adjust input and hidden weights. ***/ bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); } void bpnn_save(net, filename) BPNN * net; char *filename; { int n1, n2, n3, i, j, memcnt; float dvalue, **w; char *mem; ///add // FILE * pFile; pFile = fopen(filename, "w+"); /////// /* * if ((fd = creat(filename, 0644)) == -1) { printf("BPNN_SAVE: Cannot * create '%s'\n", filename); return; } */ n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n; printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename); //fflush(stdout); //write(fd, (char *)&n1, sizeof(int)); //write(fd, (char *)&n2, sizeof(int)); //write(fd, (char *)&n3, sizeof(int)); fwrite((char *)&n1, sizeof(char), sizeof(char), pFile); fwrite((char *)&n2, sizeof(char), sizeof(char), pFile); fwrite((char *)&n3, sizeof(char), sizeof(char), pFile); memcnt = 0; w = net->input_weights; mem = (char *)malloc((unsigned)((n1 + 1) * (n2 + 1) * sizeof(float))); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n1 + 1) * (n2 + 1) * sizeof(float)); fwrite(mem, (unsigned)(sizeof(float)), (unsigned)((n1 + 1) * (n2 + 1) * sizeof(float)), pFile); free(mem); memcnt = 0; w = net->hidden_weights; mem = (char *)malloc((unsigned)((n2 + 1) * (n3 + 1) * sizeof(float))); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { dvalue = w[i][j]; fastcopy(&mem[memcnt], &dvalue, sizeof(float)); memcnt += sizeof(float); } } //write(fd, mem, (n2 + 1) * (n3 + 1) * sizeof(float)); fwrite(mem, sizeof(float), (unsigned)((n2 + 1) * (n3 + 1) * sizeof(float)), pFile); free(mem); fclose(pFile); return; } BPNN * bpnn_read(filename) char *filename; { char *mem; BPNN * new; int fd, n1, n2, n3, i, j, memcnt; if ((fd = open(filename, 0, 0644)) == -1) { return (NULL); } printf("Reading '%s'\n", filename); //fflush(stdout); read(fd, (char *)&n1, sizeof(int)); read(fd, (char *)&n2, sizeof(int)); read(fd, (char *)&n3, sizeof(int)); new = bpnn_internal_create(n1, n2, n3); printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3); printf("Reading input weights..."); //fflush(stdout); memcnt = 0; mem = (char *)malloc((unsigned)((n1 + 1) * (n2 + 1) * sizeof(float))); read(fd, mem, (n1 + 1) * (n2 + 1) * sizeof(float)); for (i = 0; i <= n1; i++) { for (j = 0; j <= n2; j++) { fastcopy(&(new->input_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); printf("Done\nReading hidden weights..."); //fflush(stdout); memcnt = 0; mem = (char *)malloc((unsigned)((n2 + 1) * (n3 + 1) * sizeof(float))); read(fd, mem, (n2 + 1) * (n3 + 1) * sizeof(float)); for (i = 0; i <= n2; i++) { for (j = 0; j <= n3; j++) { fastcopy(&(new->hidden_weights[i][j]), &mem[memcnt], sizeof(float)); memcnt += sizeof(float); } } free(mem); close(fd); printf("Done\n"); //fflush(stdout); bpnn_zero_weights(new->input_prev_weights, n1, n2); bpnn_zero_weights(new->hidden_prev_weights, n2, n3); return (new); }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
/* * Order-2, 3D 25 point stencil Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****)malloc(sizeof(double ***) * 2); double ***roc2 = (double ***)malloc(sizeof(double **)); A[0] = (double ***)malloc(sizeof(double **) * Nz); A[1] = (double ***)malloc(sizeof(double **) * Nz); roc2 = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[0][i] = (double **)malloc(sizeof(double *) * Ny); A[1][i] = (double **)malloc(sizeof(double *) * Ny); roc2[i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[0][i][j] = (double *)malloc(sizeof(double) * Nx); A[1][i][j] = (double *)malloc(sizeof(double) * Nx); roc2[i][j] = (double *)malloc(sizeof(double) * Nx); } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = 2.0 * A[t % 2][i][j][k] - A[(t + 1) % 2][i][j][k] + roc2[i][j][k] * ( coef0 * A[t % 2][i][j][k] + coef1 * (A[t % 2][i - 1][j][k] + A[t % 2][i + 1][j][k] + A[t % 2][i][j - 1][k] + A[t % 2][i][j + 1][k] + A[t % 2][i][j][k - 1] + A[t % 2][i][j][k + 1]) + coef2 * (A[t % 2][i - 2][j][k] + A[t % 2][i + 2][j][k] + A[t % 2][i][j - 2][k] + A[t % 2][i][j + 2][k] + A[t % 2][i][j][k - 2] + A[t % 2][i][j][k + 2]) + coef3 * (A[t % 2][i - 3][j][k] + A[t % 2][i + 3][j][k] + A[t % 2][i][j - 3][k] + A[t % 2][i][j + 3][k] + A[t % 2][i][j][k - 3] + A[t % 2][i][j][k + 3]) + coef4 * (A[t % 2][i - 4][j][k] + A[t % 2][i + 4][j][k] + A[t % 2][i][j - 4][k] + A[t % 2][i][j + 4][k] + A[t % 2][i][j][k - 4] + A[t % 2][i][j][k + 4])); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
/* * Order-2, 3D 25 point stencil Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****)malloc(sizeof(double ***) * 2); double ***roc2 = (double ***)malloc(sizeof(double **)); A[0] = (double ***)malloc(sizeof(double **) * Nz); A[1] = (double ***)malloc(sizeof(double **) * Nz); roc2 = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[0][i] = (double **)malloc(sizeof(double *) * Ny); A[1][i] = (double **)malloc(sizeof(double *) * Ny); roc2[i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[0][i][j] = (double *)malloc(sizeof(double) * Nx); A[1][i][j] = (double *)malloc(sizeof(double) * Nx); roc2[i][j] = (double *)malloc(sizeof(double) * Nx); } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = 2.0 * A[t % 2][i][j][k] - A[(t + 1) % 2][i][j][k] + roc2[i][j][k] * ( coef0 * A[t % 2][i][j][k] + coef1 * (A[t % 2][i - 1][j][k] + A[t % 2][i + 1][j][k] + A[t % 2][i][j - 1][k] + A[t % 2][i][j + 1][k] + A[t % 2][i][j][k - 1] + A[t % 2][i][j][k + 1]) + coef2 * (A[t % 2][i - 2][j][k] + A[t % 2][i + 2][j][k] + A[t % 2][i][j - 2][k] + A[t % 2][i][j + 2][k] + A[t % 2][i][j][k - 2] + A[t % 2][i][j][k + 2]) + coef3 * (A[t % 2][i - 3][j][k] + A[t % 2][i + 3][j][k] + A[t % 2][i][j - 3][k] + A[t % 2][i][j + 3][k] + A[t % 2][i][j][k - 3] + A[t % 2][i][j][k + 3]) + coef4 * (A[t % 2][i - 4][j][k] + A[t % 2][i + 4][j][k] + A[t % 2][i][j - 4][k] + A[t % 2][i][j + 4][k] + A[t % 2][i][j][k - 4] + A[t % 2][i][j][k + 4])); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_binop__rdiv_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rdiv_fp32 // A.*B function (eWiseMult): GB_AemultB__rdiv_fp32 // A*D function (colscale): GB_AxD__rdiv_fp32 // D*A function (rowscale): GB_DxB__rdiv_fp32 // C+=B function (dense accum): GB_Cdense_accumB__rdiv_fp32 // C+=b function (dense accum): GB_Cdense_accumb__rdiv_fp32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_fp32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_fp32 // C=scalar+B GB_bind1st__rdiv_fp32 // C=scalar+B' GB_bind1st_tran__rdiv_fp32 // C=A+scalar GB_bind2nd__rdiv_fp32 // C=A'+scalar GB_bind2nd_tran__rdiv_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (bij / aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (y / x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FP32 || GxB_NO_RDIV_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rdiv_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rdiv_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (bij / x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rdiv_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (y / aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij / x) ; \ } GrB_Info GB_bind1st_tran__rdiv_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (y / aij) ; \ } GrB_Info GB_bind2nd_tran__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rdiv_fp32 // A.*B function (eWiseMult): GB_AemultB__rdiv_fp32 // A*D function (colscale): GB_AxD__rdiv_fp32 // D*A function (rowscale): GB_DxB__rdiv_fp32 // C+=B function (dense accum): GB_Cdense_accumB__rdiv_fp32 // C+=b function (dense accum): GB_Cdense_accumb__rdiv_fp32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_fp32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_fp32 // C=scalar+B GB_bind1st__rdiv_fp32 // C=scalar+B' GB_bind1st_tran__rdiv_fp32 // C=A+scalar GB_bind2nd__rdiv_fp32 // C=A'+scalar GB_bind2nd_tran__rdiv_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (bij / aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (y / x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FP32 || GxB_NO_RDIV_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rdiv_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rdiv_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (bij / x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rdiv_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (y / aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij / x) ; \ } GrB_Info GB_bind1st_tran__rdiv_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (y / aij) ; \ } GrB_Info GB_bind2nd_tran__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__rdiv_fp32 // A.*B function (eWiseMult): GB_AemultB__rdiv_fp32 // A*D function (colscale): GB_AxD__rdiv_fp32 // D*A function (rowscale): GB_DxB__rdiv_fp32 // C+=B function (dense accum): GB_Cdense_accumB__rdiv_fp32 // C+=b function (dense accum): GB_Cdense_accumb__rdiv_fp32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_fp32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_fp32 // C=scalar+B GB_bind1st__rdiv_fp32 // C=scalar+B' GB_bind1st_tran__rdiv_fp32 // C=A+scalar GB_bind2nd__rdiv_fp32 // C=A'+scalar GB_bind2nd_tran__rdiv_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (bij / aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (y / x) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FP32 || GxB_NO_RDIV_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__rdiv_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__rdiv_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (bij / x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__rdiv_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (y / aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij / x) ; \ } GrB_Info GB_bind1st_tran__rdiv_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (y / aij) ; \ } GrB_Info GB_bind2nd_tran__rdiv_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fw.c
/* Standard implementation of the Floyd-Warshall Algorithm */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "util.h" #include <omp.h> #include <immintrin.h> #include <emmintrin.h> inline int min(int a, int b); int main(int argc, char **argv) { int **A; int i,j,k; struct timeval t1, t2; double time; int N=1024; if (argc != 2) { fprintf(stdout,"Usage: %s N\n", argv[0]); exit(0); } N=atoi(argv[1]); // t=atoi(argv[2]); A = (int **) malloc(N*sizeof(int *)); for(i=0; i<N; i++) A[i] = (int *) malloc(N*sizeof(int)); graph_init_random(A,-1,N,128*N); gettimeofday(&t1,0); __m128i *p; //__m128i *p1; __m128i comp,Aij,Akj,Aik,Ai1k,Ai2k,Ai3k,Mask; for(k=0;k<N;k++) { #pragma omp parallel for private(i) for(i=0; i< N ; i+=4) { Aik=_mm_set1_epi32(A[i][k]); Ai1k=_mm_set1_epi32(A[i+1][k]); Ai2k=_mm_set1_epi32(A[i+2][k]); Ai3k=_mm_set1_epi32(A[i+3][k]); #pragma omp parallel for private(j) for ( j=0 ; j<N ; j+=4) { Akj=_mm_load_si128((__m128i*) &A[k][j]); if(i!=k) { p=(__m128i*)(&A[i][j]); Aij=_mm_load_si128(p); comp =_mm_add_epi32(Aik,Akj); Aij =_mm_min_epi32(Aij,comp); //Mask=_mm_cmplt_epi32(Aij,comp); //Aij=_mm_or_si128( _mm_and_si128(Mask,Aij), _mm_andnot_si128(Mask,comp) ); _mm_store_si128(p,Aij); } p=(__m128i*)(&A[i+1][j]); Aij=_mm_load_si128(p); comp =_mm_add_epi32(Ai1k,Akj); Aij =_mm_min_epi32(Aij,comp); //Mask=_mm_cmplt_epi32(Aij,comp); //Aij=_mm_or_si128( _mm_and_si128(Mask,Aij), _mm_andnot_si128(Mask,comp) ); _mm_store_si128(p,Aij); p=(__m128i*)(&A[i+2][j]); Aij=_mm_load_si128(p); comp =_mm_add_epi32(Ai2k,Akj); Aij =_mm_min_epi32(Aij,comp); //Mask=_mm_cmplt_epi32(Aij,comp); //Aij=_mm_or_si128( _mm_and_si128(Mask,Aij), _mm_andnot_si128(Mask,comp) ); _mm_store_si128(p,Aij); p=(__m128i*)(&A[i+3][j]); Aij=_mm_load_si128(p); comp =_mm_add_epi32(Ai3k,Akj); Aij =_mm_min_epi32(Aij,comp); //Mask=_mm_cmplt_epi32(Aij,comp); //Aij=_mm_or_si128( _mm_and_si128(Mask,Aij), _mm_andnot_si128(Mask,comp) ); _mm_store_si128(p,Aij); } } } /* for(k=K; k<K+N; k++) for(i=I; i<I+N; i++) for(j=J; j<J+N; j++) { A[i][j]=min(A[i][j],A[i][k]+A[k][j]); } */ /* if ( i==k ) continue ; for(j=0; j<N ; j++) A[i][j]=min(A[i][j], A[i][k]+A[k][j]); } }*/ /* for(i=0; i<N; i++) for(j=0; j<N; j++) A[i][j]=min(A[i][j], A[i][k] + A[k][j]); */ gettimeofday(&t2,0); time=(double)((t2.tv_sec-t1.tv_sec)*1000000+t2.tv_usec-t1.tv_usec)/1000000; printf("FW,%d,%.4f\n", N, time); /* for(i=0; i<N; i++) for(j=0; j<N; j++) fprintf(stdout,"%d\n", A[i][j]); */ return 0; } inline int min(int a, int b) { if(a<=b)return a; else return b; }
/* * Standard implementation of the Floyd-Warshall Algorithm */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "util.h" #include <omp.h> #include <immintrin.h> #include <emmintrin.h> inline int min(int a, int b); int main(int argc, char **argv) { int **A; int i, j, k; struct timeval t1, t2; double time; int N = 1024; if (argc != 2) { fprintf(stdout, "Usage: %s N\n", argv[0]); exit(0); } N = atoi(argv[1]); //t = atoi(argv[2]); A = (int **)malloc(N * sizeof(int *)); for (i = 0; i < N; i++) A[i] = (int *)malloc(N * sizeof(int)); graph_init_random(A, -1, N, 128 * N); gettimeofday(&t1, 0); __m128i *p; //__m128i * p1; __m128i comp, Aij, Akj, Aik, Ai1k, Ai2k, Ai3k, Mask; for (k = 0; k < N; k++) { for (i = 0; i < N; i += 4) { Aik = _mm_set1_epi32(A[i][k]); Ai1k = _mm_set1_epi32(A[i + 1][k]); Ai2k = _mm_set1_epi32(A[i + 2][k]); Ai3k = _mm_set1_epi32(A[i + 3][k]); for (j = 0; j < N; j += 4) { Akj = _mm_load_si128((__m128i *) & A[k][j]); if (i != k) { p = (__m128i *) (&A[i][j]); Aij = _mm_load_si128(p); comp = _mm_add_epi32(Aik, Akj); Aij = _mm_min_epi32(Aij, comp); //Mask = _mm_cmplt_epi32(Aij, comp); //Aij = _mm_or_si128(_mm_and_si128(Mask, Aij), _mm_andnot_si128(Mask, comp)); _mm_store_si128(p, Aij); } p = (__m128i *) (&A[i + 1][j]); Aij = _mm_load_si128(p); comp = _mm_add_epi32(Ai1k, Akj); Aij = _mm_min_epi32(Aij, comp); //Mask = _mm_cmplt_epi32(Aij, comp); //Aij = _mm_or_si128(_mm_and_si128(Mask, Aij), _mm_andnot_si128(Mask, comp)); _mm_store_si128(p, Aij); p = (__m128i *) (&A[i + 2][j]); Aij = _mm_load_si128(p); comp = _mm_add_epi32(Ai2k, Akj); Aij = _mm_min_epi32(Aij, comp); //Mask = _mm_cmplt_epi32(Aij, comp); //Aij = _mm_or_si128(_mm_and_si128(Mask, Aij), _mm_andnot_si128(Mask, comp)); _mm_store_si128(p, Aij); p = (__m128i *) (&A[i + 3][j]); Aij = _mm_load_si128(p); comp = _mm_add_epi32(Ai3k, Akj); Aij = _mm_min_epi32(Aij, comp); //Mask = _mm_cmplt_epi32(Aij, comp); //Aij = _mm_or_si128(_mm_and_si128(Mask, Aij), _mm_andnot_si128(Mask, comp)); _mm_store_si128(p, Aij); } } } /* * for(k=K; k<K+N; k++) for(i=I; i<I+N; i++) for(j=J; j<J+N; j++) { * * A[i][j]=min(A[i][j],A[i][k]+A[k][j]); * * } */ /* * if ( i==k ) continue ; * * for(j=0; j<N ; j++) A[i][j]=min(A[i][j], A[i][k]+A[k][j]); * * * } * } */ /* * for(i=0; i<N; i++) for(j=0; j<N; j++) A[i][j]=min(A[i][j], A[i][k] + * A[k][j]); */ gettimeofday(&t2, 0); time = (double)((t2.tv_sec - t1.tv_sec) * 1000000 + t2.tv_usec - t1.tv_usec) / 1000000; printf("FW,%d,%.4f\n", N, time); /* * for(i=0; i<N; i++) for(j=0; j<N; j++) fprintf(stdout,"%d\n", A[i][j]); */ return 0; } inline int min(int a, int b) { if (a <= b) return a; else return b; }
/* * Standard implementation of the Floyd-Warshall Algorithm */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "util.h" #include <omp.h> #include <immintrin.h> #include <emmintrin.h> inline int min(int a, int b); int main(int argc, char **argv) { int **A; int i, j, k; struct timeval t1, t2; double time; int N = 1024; if (argc != 2) { fprintf(stdout, "Usage: %s N\n", argv[0]); exit(0); } N = atoi(argv[1]); //t = atoi(argv[2]); A = (int **)malloc(N * sizeof(int *)); for (i = 0; i < N; i++) A[i] = (int *)malloc(N * sizeof(int)); graph_init_random(A, -1, N, 128 * N); gettimeofday(&t1, 0); __m128i *p; //__m128i * p1; __m128i comp, Aij, Akj, Aik, Ai1k, Ai2k, Ai3k, Mask; for (k = 0; k < N; k++) { #pragma omp parallel for private(i) for (i = 0; i < N; i += 4) { Aik = _mm_set1_epi32(A[i][k]); Ai1k = _mm_set1_epi32(A[i + 1][k]); Ai2k = _mm_set1_epi32(A[i + 2][k]); Ai3k = _mm_set1_epi32(A[i + 3][k]); #pragma omp parallel for private(j) for (j = 0; j < N; j += 4) { Akj = _mm_load_si128((__m128i *) & A[k][j]); if (i != k) { p = (__m128i *) (&A[i][j]); Aij = _mm_load_si128(p); comp = _mm_add_epi32(Aik, Akj); Aij = _mm_min_epi32(Aij, comp); //Mask = _mm_cmplt_epi32(Aij, comp); //Aij = _mm_or_si128(_mm_and_si128(Mask, Aij), _mm_andnot_si128(Mask, comp)); _mm_store_si128(p, Aij); } p = (__m128i *) (&A[i + 1][j]); Aij = _mm_load_si128(p); comp = _mm_add_epi32(Ai1k, Akj); Aij = _mm_min_epi32(Aij, comp); //Mask = _mm_cmplt_epi32(Aij, comp); //Aij = _mm_or_si128(_mm_and_si128(Mask, Aij), _mm_andnot_si128(Mask, comp)); _mm_store_si128(p, Aij); p = (__m128i *) (&A[i + 2][j]); Aij = _mm_load_si128(p); comp = _mm_add_epi32(Ai2k, Akj); Aij = _mm_min_epi32(Aij, comp); //Mask = _mm_cmplt_epi32(Aij, comp); //Aij = _mm_or_si128(_mm_and_si128(Mask, Aij), _mm_andnot_si128(Mask, comp)); _mm_store_si128(p, Aij); p = (__m128i *) (&A[i + 3][j]); Aij = _mm_load_si128(p); comp = _mm_add_epi32(Ai3k, Akj); Aij = _mm_min_epi32(Aij, comp); //Mask = _mm_cmplt_epi32(Aij, comp); //Aij = _mm_or_si128(_mm_and_si128(Mask, Aij), _mm_andnot_si128(Mask, comp)); _mm_store_si128(p, Aij); } } } /* * for(k=K; k<K+N; k++) for(i=I; i<I+N; i++) for(j=J; j<J+N; j++) { * * A[i][j]=min(A[i][j],A[i][k]+A[k][j]); * * } */ /* * if ( i==k ) continue ; * * for(j=0; j<N ; j++) A[i][j]=min(A[i][j], A[i][k]+A[k][j]); * * * } * } */ /* * for(i=0; i<N; i++) for(j=0; j<N; j++) A[i][j]=min(A[i][j], A[i][k] + * A[k][j]); */ gettimeofday(&t2, 0); time = (double)((t2.tv_sec - t1.tv_sec) * 1000000 + t2.tv_usec - t1.tv_usec) / 1000000; printf("FW,%d,%.4f\n", N, time); /* * for(i=0; i<N; i++) for(j=0; j<N; j++) fprintf(stdout,"%d\n", A[i][j]); */ return 0; } inline int min(int a, int b) { if (a <= b) return a; else return b; }
GB_unop__atan_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__atan_fp32_fp32 // op(A') function: GB_unop_tran__atan_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = atanf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atanf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = atanf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__atan_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = atanf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__atan_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__atan_fp32_fp32 // op(A') function: GB_unop_tran__atan_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = atanf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atanf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = atanf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__atan_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = atanf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__atan_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__atan_fp32_fp32 // op(A') function: GB_unop_tran__atan_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = atanf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atanf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = atanf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__atan_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = atanf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__atan_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
blackscholes.c
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif //ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i=begin; i!=end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j=0; j<NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 0; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i=0; i<numOptions; i++) { #else //ENABLE_OPENMP for (i=start; i<end; i++) { #endif //ENABLE_OPENMP /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } #endif //ENABLE_TBB int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif argc = 4; if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = 1; // atoi(argv[1]); char *inputFile = "/BKI.TXT"; // argv[2]; char *outputFile = "/OUT.TXT"; // argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE)); nums = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif //WIN32 #else //ENABLE_THREADS #ifdef ENABLE_OPENMP { int tid=0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else //ENABLE_OPENMP #ifdef ENABLE_TBB tbb::task_scheduler_init init(nThreads); int tid=0; bs_thread(&tid); #else //ENABLE_TBB //serial version int tid=0; bs_thread(&tid); #endif //ENABLE_TBB #endif //ENABLE_OPENMP #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
// Copyright (c) 2007 Intel Corp. // Black-Scholes // Analytical method for calculating European Options // // // Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice // Hall, John C. Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif // Multi-threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi-threaded OpenMP header #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif //ENABLE_TBB // Multi-threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; // spot price fptype strike; // strike price fptype r; // risk-free interest rate fptype divq; // dividend rate fptype v; // volatility fptype t; // time to maturity or option expiration in years // (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc) char OptionType; // Option type. "P"=PUT, "C"=CALL fptype divs; // dividend vals (not used in this test) fptype DGrefval; // DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int * otype; fptype * sptprice; fptype * strike; fptype * rate; fptype * volatility; fptype * otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // Cumulative Normal Distribution Function // See Hull, Section 11.8, P.243-244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF ( fptype InputX ) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; // Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; // Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv( fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet ) { fptype OptionPrice; // local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log( sptprice / strike ); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF( d1 ); NofXd2 = CNDF( d2 ); FutureValueX = strike * ( exp( -(rate)*(time) ) ); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() {} mainWork(mainWork &w, tbb::split) {} void operator()(const tbb::blocked_range<int> &range) const { fptype price; int begin = range.begin(); int end = range.end(); for (int i=begin; i!=end; i++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-5 ){ fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } }; #endif // ENABLE_TBB ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb::affinity_partitioner a; mainWork doall; for (j=0; j<NUM_RUNS; j++) { tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a); } return 0; } #else // !ENABLE_TBB #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr){ #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j=0; j<NUM_RUNS; j++) { /* Calling main function to calculate option value based on * Black & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if( fabs(priceDelta) >= 1e-4 ){ printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError ++; } #endif } } return 0; } #endif //ENABLE_TBB int main (int argc, char **argv) { FILE *file; int i; int loopnum; fptype * buffer; int * buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif //PARSEC_VERSION #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif argc = 4; if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = 1; // atoi(argv[1]); char *inputFile = "/BKI.TXT"; // argv[2]; char *outputFile = "/OUT.TXT"; // argv[3]; //Read input data from file file = fopen(inputFile, "r"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if(rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if(nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if(nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif // alloc spaces for the option data data = (OptionData*)malloc(numOptions*sizeof(OptionData)); prices = (fptype*)malloc(numOptions*sizeof(fptype)); for ( loopnum = 0; loopnum < numOptions; ++ loopnum ) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if(rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(,8000000,nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD); otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i=0; i<numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE)); nums = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *) malloc (nThreads * sizeof(int)); for(i=0; i<nThreads; i++) { tids[i]=i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif //WIN32 #else //ENABLE_THREADS #endif //ENABLE_THREADS #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if(file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for(i=0; i<numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if(rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if(rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
// Copyright(c) 2007 Intel Corp. // Black - Scholes // Analytical method for calculating European Options // // //Reference Source:Options, Futures, and Other Derivatives, 3 rd Edition, Prentice // Hall, John C.Hull, #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #ifdef ENABLE_PARSEC_HOOKS #include <hooks.h> #endif //Multi - threaded pthreads header #ifdef ENABLE_THREADS // Add the following line so that icc 9.0 is compatible with pthread lib. #define __thread __threadp MAIN_ENV #undef __thread #endif // Multi - threaded OpenMP header #ifdef ENABLE_OPENMP #include <omp.h> #endif #ifdef ENABLE_TBB #include "tbb/blocked_range.h" #include "tbb/parallel_for.h" #include "tbb/task_scheduler_init.h" #include "tbb/tick_count.h" using namespace std; using namespace tbb; #endif /* //ENABLE_TBB */ //Multi -threaded header for Windows #ifdef WIN32 #pragma warning(disable : 4305) #pragma warning(disable : 4244) #include <windows.h> #endif //Precision to use for calculations #define fptype float #define NUM_RUNS 100 typedef struct OptionData_ { fptype s; //spot price fptype strike; //strike price fptype r; //risk - free interest rate fptype divq; //dividend rate fptype v; //volatility fptype t; //time to maturity or option expiration in years // (1 yr = 1.0, 6 mos = 0.5, 3 mos = 0.25,..., etc) char OptionType; //Option type."P" = PUT, "C" = CALL fptype divs; //dividend vals(not used in this test) fptype DGrefval; //DerivaGem Reference Value } OptionData; OptionData *data; fptype *prices; int numOptions; int *otype; fptype *sptprice; fptype *strike; fptype *rate; fptype *volatility; fptype *otime; int numError = 0; int nThreads; //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //Cumulative Normal Distribution Function // See Hull, Section 11.8, P .243 - 244 #define inv_sqrt_2xPI 0.39894228040143270286 fptype CNDF(fptype InputX) { int sign; fptype OutputX; fptype xInput; fptype xNPrimeofX; fptype expValues; fptype xK2; fptype xK2_2, xK2_3; fptype xK2_4, xK2_5; fptype xLocal, xLocal_1; fptype xLocal_2, xLocal_3; //Check for negative value of InputX if (InputX < 0.0) { InputX = -InputX; sign = 1; } else sign = 0; xInput = InputX; //Compute NPrimeX term common to both four & six decimal accuracy calcs expValues = exp(-0.5 f * InputX * InputX); xNPrimeofX = expValues; xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI; xK2 = 0.2316419 * xInput; xK2 = 1.0 + xK2; xK2 = 1.0 / xK2; xK2_2 = xK2 * xK2; xK2_3 = xK2_2 * xK2; xK2_4 = xK2_3 * xK2; xK2_5 = xK2_4 * xK2; xLocal_1 = xK2 * 0.319381530; xLocal_2 = xK2_2 * (-0.356563782); xLocal_3 = xK2_3 * 1.781477937; xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_4 * (-1.821255978); xLocal_2 = xLocal_2 + xLocal_3; xLocal_3 = xK2_5 * 1.330274429; xLocal_2 = xLocal_2 + xLocal_3; xLocal_1 = xLocal_2 + xLocal_1; xLocal = xLocal_1 * xNPrimeofX; xLocal = 1.0 - xLocal; OutputX = xLocal; if (sign) { OutputX = 1.0 - OutputX; } return OutputX; } ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// fptype BlkSchlsEqEuroNoDiv(fptype sptprice, fptype strike, fptype rate, fptype volatility, fptype time, int otype, float timet) { fptype OptionPrice; //local private working variables for the calculation fptype xStockPrice; fptype xStrikePrice; fptype xRiskFreeRate; fptype xVolatility; fptype xTime; fptype xSqrtTime; fptype logValues; fptype xLogTerm; fptype xD1; fptype xD2; fptype xPowerTerm; fptype xDen; fptype d1; fptype d2; fptype FutureValueX; fptype NofXd1; fptype NofXd2; fptype NegNofXd1; fptype NegNofXd2; xStockPrice = sptprice; xStrikePrice = strike; xRiskFreeRate = rate; xVolatility = volatility; xTime = time; xSqrtTime = sqrt(xTime); logValues = log(sptprice / strike); xLogTerm = logValues; xPowerTerm = xVolatility * xVolatility; xPowerTerm = xPowerTerm * 0.5; xD1 = xRiskFreeRate + xPowerTerm; xD1 = xD1 * xTime; xD1 = xD1 + xLogTerm; xDen = xVolatility * xSqrtTime; xD1 = xD1 / xDen; xD2 = xD1 - xDen; d1 = xD1; d2 = xD2; NofXd1 = CNDF(d1); NofXd2 = CNDF(d2); FutureValueX = strike * (exp(-(rate) * (time))); if (otype == 0) { OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2); } else { NegNofXd1 = (1.0 - NofXd1); NegNofXd2 = (1.0 - NofXd2); OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1); } return OptionPrice; } #ifdef ENABLE_TBB struct mainWork { mainWork() { } mainWork(mainWork & w, tbb::split) { } void operator() (const tbb::blocked_range < int >&range)const { fptype price; int begin = range.begin(); int end = range.end(); for (int i = begin; i != end; i++) { /* * Calling main function to calculate option value based on Black * & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK fptype priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-5) { fprintf(stderr, "Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } }; #endif /* // ENABLE_TBB */ ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////// #ifdef ENABLE_TBB int bs_thread(void *tid_ptr) { int j; tbb: :affinity_partitioner a; mainWork doall; for (j = 0; j < NUM_RUNS; j++) { tbb: : parallel_for(tbb: :blocked_range < int >(0, numOptions), doall, a); } return 0; } #else /* // !ENABLE_TBB */ #ifdef WIN32 DWORD WINAPI bs_thread(LPVOID tid_ptr) { #else int bs_thread(void *tid_ptr) { #endif int i, j; fptype price; fptype priceDelta; int tid = *(int *)tid_ptr; int start = tid * (numOptions / nThreads); int end = start + (numOptions / nThreads); for (j = 0; j < NUM_RUNS; j++) { #ifdef ENABLE_OPENMP #pragma omp parallel for private(i, price, priceDelta) for (i = 0; i < numOptions; i++) { #else /* //ENABLE_OPENMP */ for (i = start; i < end; i++) { #endif /* //ENABLE_OPENMP */ /* * Calling main function to calculate option value based on Black * & Scholes's equation. */ price = BlkSchlsEqEuroNoDiv(sptprice[i], strike[i], rate[i], volatility[i], otime[i], otype[i], 0); prices[i] = price; #ifdef ERR_CHK priceDelta = data[i].DGrefval - price; if (fabs(priceDelta) >= 1e-4) { printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n", i, price, data[i].DGrefval, priceDelta); numError++; } #endif } } return 0; } #endif /* //ENABLE_TBB */ int main(int argc, char **argv) { FILE *file; int i; int loopnum; fptype *buffer; int *buffer2; int rv; #ifdef PARSEC_VERSION #define __PARSEC_STRING(x) #x #define __PARSEC_XSTRING(x) __PARSEC_STRING(x) printf("PARSEC Benchmark Suite Version " __PARSEC_XSTRING(PARSEC_VERSION) "\n"); fflush(NULL); #else printf("PARSEC Benchmark Suite\n"); fflush(NULL); #endif /* //PARSEC_VERSION */ #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_begin(__parsec_blackscholes); #endif argc = 4; if (argc != 4) { printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]); exit(1); } nThreads = 1; //atoi(argv[1]); char *inputFile = "/BKI.TXT"; //argv[2]; char *outputFile = "/OUT.TXT"; //argv[3]; //Read input data from file file = fopen(inputFile, "r"); if (file == NULL) { printf("ERROR: Unable to open file `%s'.\n", inputFile); exit(1); } rv = fscanf(file, "%i", &numOptions); if (rv != 1) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } if (nThreads > numOptions) { printf("WARNING: Not enough work, reducing number of threads to match number of options.\n"); nThreads = numOptions; } #if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB) if (nThreads != 1) { printf("Error: <nthreads> must be 1 (serial version)\n"); exit(1); } #endif //alloc spaces for the option data data = (OptionData *) malloc(numOptions * sizeof(OptionData)); prices = (fptype *) malloc(numOptions * sizeof(fptype)); for (loopnum = 0; loopnum < numOptions; ++loopnum) { rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval); if (rv != 9) { printf("ERROR: Unable to read from file `%s'.\n", inputFile); fclose(file); exit(1); } } rv = fclose(file); if (rv != 0) { printf("ERROR: Unable to close file `%s'.\n", inputFile); exit(1); } #ifdef ENABLE_THREADS MAIN_INITENV(, 8000000, nThreads); #endif printf("Num of Options: %d\n", numOptions); printf("Num of Runs: %d\n", NUM_RUNS); #define PAD 256 #define LINESIZE 64 buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD); sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1)); strike = sptprice + numOptions; rate = strike + numOptions; volatility = rate + numOptions; otime = volatility + numOptions; buffer2 = (int *)malloc(numOptions * sizeof(fptype) + PAD); otype = (int *)(((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1)); for (i = 0; i < numOptions; i++) { otype[i] = (data[i].OptionType == 'P') ? 1 : 0; sptprice[i] = data[i].s; strike[i] = data[i].strike; rate[i] = data[i].r; volatility[i] = data[i].v; otime[i] = data[i].t; } printf("Size of data: %d\n", numOptions * (sizeof(OptionData) + sizeof(int))); #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_begin(); #endif #ifdef ENABLE_THREADS #ifdef WIN32 HANDLE *threads; int *nums; threads = (HANDLE *) malloc(nThreads * sizeof(HANDLE)); nums = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { nums[i] = i; threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0); } WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE); free(threads); free(nums); #else int *tids; tids = (int *)malloc(nThreads * sizeof(int)); for (i = 0; i < nThreads; i++) { tids[i] = i; CREATE_WITH_ARG(bs_thread, &tids[i]); } WAIT_FOR_END(nThreads); free(tids); #endif /* //WIN32 */ #else /* //ENABLE_THREADS */ #ifdef ENABLE_OPENMP { int tid = 0; omp_set_num_threads(nThreads); bs_thread(&tid); } #else /* //ENABLE_OPENMP */ #ifdef ENABLE_TBB tbb: :task_scheduler_init init(nThreads); int tid = 0; bs_thread(&tid); #else /* //ENABLE_TBB */ //serial version int tid = 0; bs_thread(&tid); #endif /* //ENABLE_TBB */ #endif /* //ENABLE_OPENMP */ #endif /* //ENABLE_THREADS */ #ifdef ENABLE_PARSEC_HOOKS __parsec_roi_end(); #endif //Write prices to output file file = fopen(outputFile, "w"); if (file == NULL) { printf("ERROR: Unable to open file `%s'.\n", outputFile); exit(1); } rv = fprintf(file, "%i\n", numOptions); if (rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } for (i = 0; i < numOptions; i++) { rv = fprintf(file, "%.18f\n", prices[i]); if (rv < 0) { printf("ERROR: Unable to write to file `%s'.\n", outputFile); fclose(file); exit(1); } } rv = fclose(file); if (rv != 0) { printf("ERROR: Unable to close file `%s'.\n", outputFile); exit(1); } #ifdef ERR_CHK printf("Num Errors: %d\n", numError); #endif free(data); free(prices); #ifdef ENABLE_PARSEC_HOOKS __parsec_bench_end(); #endif return 0; }
1.c
#include <stdio.h> int main() { #pragma omp parallel { printf(" Hello "); } printf("\n\n GoodBye – Team Destroyed – Exiting Program \n\n"); }
#include <stdio.h> int main() { printf(" Hello "); printf("\n\n GoodBye – Team Destroyed – Exiting Program \n\n"); }
#include <stdio.h> int main() { #pragma omp parallel { printf(" Hello "); } printf("\n\n GoodBye – Team Destroyed – Exiting Program \n\n"); }
Example_task_dep.5.c
/* * @@name: task_dep.5c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_4.0 */ // Assume BS divides N perfectly void matmul_depend(int N, int BS, float A[N][N], float B[N][N], float C[N][N] ) { int i, j, k, ii, jj, kk; for (i = 0; i < N; i+=BS) { for (j = 0; j < N; j+=BS) { for (k = 0; k < N; k+=BS) { // Note 1: i, j, k, A, B, C are firstprivate by default // Note 2: A, B and C are just pointers #pragma omp task private(ii, jj, kk) \ depend ( in: A[i:BS][k:BS], B[k:BS][j:BS] ) \ depend ( inout: C[i:BS][j:BS] ) for (ii = i; ii < i+BS; ii++ ) for (jj = j; jj < j+BS; jj++ ) for (kk = k; kk < k+BS; kk++ ) C[ii][jj] = C[ii][jj] + A[ii][kk] * B[kk][jj]; } } } }
/* * @@name: task_dep.5c @@type: C @@compilable: yes @@linkable: no * @@expect: success @@version: omp_4.0 */ // Assume BS divides N perfectly void matmul_depend(int N, int BS, float A[N][N], float B[N][N], float C[N][N]) { int i, j, k, ii, jj, kk; for (i = 0; i < N; i += BS) { for (j = 0; j < N; j += BS) { for (k = 0; k < N; k += BS) { //Note 1:i, j, k, A, B, C are firstprivate by default //Note 2: A, B and C are just pointers depend(in: A[i: BS][k: BS], B[k: BS][j:BS]) \ depend(inout: C[i: BS][j:BS]) for (ii = i; ii < i + BS; ii++) for (jj = j; jj < j + BS; jj++) for (kk = k; kk < k + BS; kk++) C[ii][jj] = C[ii][jj] + A[ii][kk] * B[kk][jj]; } } } }
/* * @@name: task_dep.5c @@type: C @@compilable: yes @@linkable: no * @@expect: success @@version: omp_4.0 */ // Assume BS divides N perfectly void matmul_depend(int N, int BS, float A[N][N], float B[N][N], float C[N][N]) { int i, j, k, ii, jj, kk; for (i = 0; i < N; i += BS) { for (j = 0; j < N; j += BS) { for (k = 0; k < N; k += BS) { //Note 1:i, j, k, A, B, C are firstprivate by default //Note 2: A, B and C are just pointers #pragma omp task private(ii, jj, kk) \ depend ( in: A[i:BS][k:BS], B[k:BS][j:BS] ) \ depend ( inout: C[i:BS][j:BS] ) for (ii = i; ii < i + BS; ii++) for (jj = j; jj < j + BS; jj++) for (kk = k; kk < k + BS; kk++) C[ii][jj] = C[ii][jj] + A[ii][kk] * B[kk][jj]; } } } }
GB_unop__identity_int32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int32_fp32 // op(A') function: GB_unop_tran__identity_int32_fp32 // C type: int32_t // A type: float // cast: int32_t cij = GB_cast_to_int32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = GB_cast_to_int32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = GB_cast_to_int32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int32_fp32 ( int32_t *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int32_t z = GB_cast_to_int32_t ((double) (aij)) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <stdint.h> #include <mpi.h> #include <omp.h> #include "geopm.h" int main(int argc, char **argv) { int err = 0; int index = 0; int rank = 0; int num_iter = 100000000; double sum = 0.0; uint64_t region_id = 0; err = MPI_Init(&argc, &argv); if (!err) { err = geopm_prof_region("loop_0", GEOPM_REGION_HINT_UNKNOWN, &region_id); } MPI_Barrier(MPI_COMM_WORLD); if (!err) { err = geopm_prof_enter(region_id); } if (!err) { (void)geopm_tprof_init(num_iter); for (index = 0; index < num_iter; ++index) { sum += (double)index; (void)geopm_tprof_post(); } err = geopm_prof_exit(region_id); } if (!err) { err = MPI_Comm_rank(MPI_COMM_WORLD, &rank); } if (!err && !rank) { printf("sum = %e\n\n", sum); } int tmp_err = MPI_Finalize(); return err ? err : tmp_err; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <stdint.h> #include <mpi.h> #include <omp.h> #include "geopm.h" int main(int argc, char **argv) { int err = 0; int index = 0; int rank = 0; int num_iter = 100000000; double sum = 0.0; uint64_t region_id = 0; err = MPI_Init(&argc, &argv); if (!err) { err = geopm_prof_region("loop_0", GEOPM_REGION_HINT_UNKNOWN, &region_id); } MPI_Barrier(MPI_COMM_WORLD); if (!err) { err = geopm_prof_enter(region_id); } if (!err) { #pragma omp parallel default(shared) private(index) { (void)geopm_tprof_init(num_iter); #pragma omp for reduction(+:sum) for (index = 0; index < num_iter; ++index) { sum += (double)index; (void)geopm_tprof_post(); } } err = geopm_prof_exit(region_id); } if (!err) { err = MPI_Comm_rank(MPI_COMM_WORLD, &rank); } if (!err && !rank) { printf("sum = %e\n\n", sum); } int tmp_err = MPI_Finalize(); return err ? err : tmp_err; }
tree-vectorizer.h
/* Vectorizer Copyright (C) 2003-2019 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H typedef struct _stmt_vec_info *stmt_vec_info; #include "tree-data-ref.h" #include "tree-hash-traits.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION, CONST_COND_REDUCTION, /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop to implement: for (int i = 0; i < VF; ++i) res = cond[i] ? val[i] : res; */ EXTRACT_LAST_REDUCTION, /* Use a folding reduction within the loop to implement: for (int i = 0; i < VF; ++i) res = res OP val[i]; (with no reassocation). */ FOLD_LEFT_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* Structure to encapsulate information about a group of like instructions to be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; enum vect_cost_model_location where; stmt_vec_info stmt_info; int misalign; }; typedef vec<stmt_info_for_cost> stmt_vector_for_cost; /* Maps base addresses to an innermost_loop_behavior that gives the maximum known alignment for that base. */ typedef hash_map<tree_operand_hash, innermost_loop_behavior *> vec_base_alignments; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* A computation tree of an SLP instance. Each node corresponds to a group of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec<slp_tree> children; /* A group of scalar stmts to be vectorized together. */ vec<stmt_vec_info> stmts; /* Load permutation relative to the stores, NULL if there is no permutation. */ vec<unsigned> load_permutation; /* Vectorized stmt/s. */ vec<stmt_vec_info> vec_stmts; /* Number of vector stmts that are created to replace the group of scalar stmts. It is calculated during the transformation phase as the number of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF divided by vector size. */ unsigned int vec_stmts_size; /* Reference count in the SLP graph. */ unsigned int refcnt; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* SLP instance is a sequence of stmts in a loop that can be packed into SIMD stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ poly_uint64 unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec<slp_tree> loads; /* The SLP node containing the reduction PHIs. */ slp_tree reduc_phis; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* Describes two objects whose addresses must be unequal for the vectorized loop to be valid. */ typedef std::pair<tree, tree> vec_object_pair; /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ struct vec_lower_bound { vec_lower_bound () {} vec_lower_bound (tree e, bool u, poly_uint64 m) : expr (e), unsigned_p (u), min_value (m) {} tree expr; bool unsigned_p; poly_uint64 min_value; }; /* Vectorizer state shared between different analyses like vector sizes of the same CFG region. */ struct vec_info_shared { vec_info_shared(); ~vec_info_shared(); void save_datarefs(); void check_datarefs(); /* All data references. Freed by free_data_refs, so not an auto_vec. */ vec<data_reference_p> datarefs; vec<data_reference> datarefs_copy; /* The loop nest in which the data dependences are computed. */ auto_vec<loop_p> loop_nest; /* All data dependences. Freed by free_dependence_relations, so not an auto_vec. */ vec<ddr_p> ddrs; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum vec_kind { bb, loop }; vec_info (vec_kind, void *, vec_info_shared *); ~vec_info (); stmt_vec_info add_stmt (gimple *); stmt_vec_info lookup_stmt (gimple *); stmt_vec_info lookup_def (tree); stmt_vec_info lookup_single_use (tree); struct dr_vec_info *lookup_dr (data_reference *); void move_dr (stmt_vec_info, stmt_vec_info); void remove_stmt (stmt_vec_info); void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *); /* The type of vectorization. */ vec_kind kind; /* Shared vectorizer state. */ vec_info_shared *shared; /* The mapping of GIMPLE UID to stmt_vec_info. */ vec<stmt_vec_info> stmt_vec_infos; /* All SLP instances. */ auto_vec<slp_instance> slp_instances; /* Maps base addresses to an innermost_loop_behavior that gives the maximum known alignment for that base. */ vec_base_alignments base_alignments; /* All interleaving chains of stores, represented by the first stmt in the chain. */ auto_vec<stmt_vec_info> grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; private: stmt_vec_info new_stmt_vec_info (gimple *stmt); void set_vinfo_for_stmt (gimple *, stmt_vec_info); void free_stmt_vec_infos (); void free_stmt_vec_info (stmt_vec_info); }; struct _loop_vec_info; struct _bb_vec_info; template<> template<> inline bool is_a_helper <_loop_vec_info *>::test (vec_info *i) { return i->kind == vec_info::loop; } template<> template<> inline bool is_a_helper <_bb_vec_info *>::test (vec_info *i) { return i->kind == vec_info::bb; } /* In general, we can divide the vector statements in a vectorized loop into related groups ("rgroups") and say that for each rgroup there is some nS such that the rgroup operates on nS values from one scalar iteration followed by nS values from the next. That is, if VF is the vectorization factor of the loop, the rgroup operates on a sequence: (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) where (i,j) represents a scalar value with index j in a scalar iteration with index i. [ We use the term "rgroup" to emphasise that this grouping isn't necessarily the same as the grouping of statements used elsewhere. For example, if we implement a group of scalar loads using gather loads, we'll use a separate gather load for each scalar load, and thus each gather load will belong to its own rgroup. ] In general this sequence will occupy nV vectors concatenated together. If these vectors have nL lanes each, the total number of scalar values N is given by: N = nS * VF = nV * nL None of nS, VF, nV and nL are required to be a power of 2. nS and nV are compile-time constants but VF and nL can be variable (if the target supports variable-length vectors). In classical vectorization, each iteration of the vector loop would handle exactly VF iterations of the original scalar loop. However, in a fully-masked loop, a particular iteration of the vector loop might handle fewer than VF iterations of the scalar loop. The vector lanes that correspond to iterations of the scalar loop are said to be "active" and the other lanes are said to be "inactive". In a fully-masked loop, many rgroups need to be masked to ensure that they have no effect for the inactive lanes. Each such rgroup needs a sequence of booleans in the same order as above, but with each (i,j) replaced by a boolean that indicates whether iteration i is active. This sequence occupies nV vector masks that again have nL lanes each. Thus the mask sequence as a whole consists of VF independent booleans that are each repeated nS times. We make the simplifying assumption that if a sequence of nV masks is suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by VIEW_CONVERTing it. This holds for all current targets that support fully-masked loops. For example, suppose the scalar loop is: float *f; double *d; for (int i = 0; i < n; ++i) { f[i * 2 + 0] += 1.0f; f[i * 2 + 1] += 2.0f; d[i] += 3.0; } and suppose that vectors have 256 bits. The vectorized f accesses will belong to one rgroup and the vectorized d access to another: f rgroup: nS = 2, nV = 1, nL = 8 d rgroup: nS = 1, nV = 1, nL = 4 VF = 4 [ In this simple example the rgroups do correspond to the normal SLP grouping scheme. ] If only the first three lanes are active, the masks we need are: f rgroup: 1 1 | 1 1 | 1 1 | 0 0 d rgroup: 1 | 1 | 1 | 0 Here we can use a mask calculated for f's rgroup for d's, but not vice versa. Thus for each value of nV, it is enough to provide nV masks, with the mask being calculated based on the highest nL (or, equivalently, based on the highest nS) required by any rgroup with that nV. We therefore represent the entire collection of masks as a two-level table, with the first level being indexed by nV - 1 (since nV == 0 doesn't exist) and the second being indexed by the mask index 0 <= i < nV. */ /* The masks needed by rgroups with nV vectors, according to the description above. */ struct rgroup_masks { /* The largest nS for all rgroups that use these masks. */ unsigned int max_nscalars_per_iter; /* The type of mask to use, based on the highest nS recorded above. */ tree mask_type; /* A vector of nV masks, in iteration order. */ vec<tree> masks; }; typedef auto_vec<rgroup_masks> vec_loop_masks; /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info : public vec_info { _loop_vec_info (struct loop *, vec_info_shared *); ~_loop_vec_info (); /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Condition under which this loop is analyzed and versioned. */ tree num_iters_assumptions; /* Threshold of number of iterations below which vectorzation will not be performed. It is calculated from MIN_PROFITABLE_ITERS and PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* When applying loop versioning, the vector form should only be used if the number of scalar iterations is >= this value, on top of all the other requirements. Ignored when loop versioning is not being used. */ poly_uint64 versioning_threshold; /* Unrolling factor */ poly_uint64 vectorization_factor; /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR if there is no particular limit. */ unsigned HOST_WIDE_INT max_vectorization_factor; /* The masks that a fully-masked loop should use to avoid operating on inactive scalars. */ vec_loop_masks masks; /* If we are using a loop mask to align memory addresses, this variable contains the number of vector elements that we should skip in the first iteration of the vector loop (i.e. the number of leading elements that should be false in the first mask). */ tree mask_skip_niters; /* Type of the variables to use in the WHILE_ULT call for fully-masked loops. */ tree mask_compare_type; /* For #pragma omp simd if (x) loops the x expression. If constant 0, the loop should not be vectorized, if constant non-zero, simd_if_cond shouldn't be set and loop vectorized normally, if SSA_NAME, the loop should be versioned on that condition, using scalar loop if the condition is false and vectorized loop otherwise. */ tree simd_if_cond; /* Unknown DRs according to which loop was peeled. */ struct dr_vec_info *unaligned_dr; /* peeling_for_alignment indicates whether peeling for alignment will take place, and what the peeling factor should be: peeling_for_alignment = X means: If X=0: Peeling for alignment will not be applied. If X>0: Peel first X iterations. If X=-1: Generate a runtime test to calculate the number of iterations to be peeled, using the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* Data Dependence Relations defining address ranges that are candidates for a run-time aliasing check. */ auto_vec<ddr_p> may_alias_ddrs; /* Data Dependence Relations defining address ranges together with segment lengths from which the run-time aliasing check is built. */ auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs; /* Check that the addresses of each pair of objects is unequal. */ auto_vec<vec_object_pair> check_unequal_addrs; /* List of values that are required to be nonzero. This is used to check whether things like "x[i * n] += 1;" are safe and eventually gets added to the checks for lower bounds below. */ auto_vec<tree> check_nonzero; /* List of values that need to be checked for a minimum value. */ auto_vec<vec_lower_bound> lower_bounds; /* Statements in the loop that have data references that are candidates for a runtime (loop versioning) misalignment check. */ auto_vec<stmt_vec_info> may_misalign_stmts; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec<stmt_vec_info> reductions; /* All reduction chains in the loop, represented by the first stmt in the chain. */ auto_vec<stmt_vec_info> reduction_chains; /* Cost vector for a single scalar iteration. */ auto_vec<stmt_info_for_cost> scalar_cost_vec; /* Map of IV base/step expressions to inserted name in the preheader. */ hash_map<tree_operand_hash, tree> *ivexpr_map; /* The unrolling factor needed to SLP the loop. In case of that pure SLP is applied to the loop, i.e., no unrolling is needed, this is 1. */ poly_uint64 slp_unrolling_factor; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* Is the loop vectorizable? */ bool vectorizable; /* Records whether we still have the option of using a fully-masked loop. */ bool can_fully_mask_p; /* True if have decided to use a fully-masked loop. */ bool fully_masked_p; /* When we have grouped data accesses with gaps, we may introduce invalid memory accesses. We peel the last iteration of the loop to prevent this. */ bool peeling_for_gaps; /* When the number of iterations is not a multiple of the vector size we need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* Reductions are canonicalized so that the last operand is the reduction operand. If this places a constant into RHS1, this decanonicalizes GIMPLE for other phases, so we must track when this has occurred and fix it up. */ bool operands_swapped; /* True if there are no loop carried data dependencies in the loop. If loop->safelen <= 1, then this is always true, either the loop didn't have any loop carried data dependencies, or the loop is being vectorized guarded with some runtime alias checks, or couldn't be vectorized at all, but then this field shouldn't be used. For loop->safelen >= 2, the user has asserted that there are no backward dependencies, but there still could be loop carried forward dependencies in such loops. This flag will be false if normal vectorizer data dependency analysis would fail or require versioning for alias, but because of loop->safelen >= 2 it has been vectorized even without versioning for alias. E.g. in: #pragma omp simd for (int i = 0; i < m; i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma ivdep) we can vectorize this and it will DTRT even for k > 0 && k < m, but without safelen we would not vectorize this, so this field would be false. */ bool no_data_dependencies; /* Mark loops having masked stores. */ bool has_mask_store; /* If if-conversion versioned this loop before conversion, this is the loop version without if-conversion. */ struct loop *scalar_loop; /* For loops being epilogues of already vectorized loops this points to the original vectorized loop. Otherwise NULL. */ _loop_vec_info *orig_loop_info; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor #define LOOP_VINFO_MASKS(L) (L)->masks #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->comp_alias_ddrs.length () > 0 \ || (L)->check_unequal_addrs.length () > 0 \ || (L)->lower_bounds.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \ (LOOP_VINFO_SIMD_IF_COND (L)) #define LOOP_REQUIRES_VERSIONING(L) \ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L)) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) #define LOOP_VINFO_EPILOGUE_P(L) \ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL value signifies success, and a NULL value signifies failure, supporting propagating an opt_problem * describing the failure back up the call stack. */ typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info; static inline loop_vec_info loop_vec_info_for_loop (struct loop *loop) { return (loop_vec_info) loop->aux; } typedef struct _bb_vec_info : public vec_info { _bb_vec_info (gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); ~_bb_vec_info (); basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs #define BB_VINFO_DDRS(B) (B)->shared->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb (basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is only used outside the loop. */ vect_used_only_live, /* The def is in the inner loop, and the use is in the outer loop, and the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* The def is in the inner loop, and the use is in the outer loop (and is not part of reduction). */ vect_used_in_outer, /* defs that feed computations that end up (only) in a reduction. These defs may be used by non-reduction stmts, but eventually, any computations/values that are affected by these defs are used to compute a reduction (i.e. don't get stored to memory, for example). We use this to identify computations that we can change the order in which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* The type of vectorization that can be applied to the stmt: regular loop-based vectorization; pure SLP - the stmt is a part of SLP instances and does not have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is a part of SLP instance and also must be loop-based vectorized, since it has uses outside SLP sequences. In the loop context the meanings of pure and hybrid SLP are slightly different. By saying that pure SLP is applied to the loop, we mean that we exploit only intra-iteration parallelism in the loop; i.e., the loop can be vectorized without doing any conceptual unrolling, cause we don't pack together stmts from different iterations, only within a single iteration. Loop hybrid SLP means that we exploit both intra-iteration and inter-iteration parallelism (e.g., number of elements in the vector is 4 and the slp-group-size is 2, in which case we don't have enough parallelism within an iteration, so we obtain the rest of the parallelism from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; /* Says whether a statement is a load, a store of a vectorized statement result, or a store of an invariant value. */ enum vec_load_store_type { VLS_LOAD, VLS_STORE, VLS_STORE_INVARIANT }; /* Describes how we're going to vectorize an individual load or store, or a group of loads or stores. */ enum vect_memory_access_type { /* An access to an invariant address. This is used only for loads. */ VMAT_INVARIANT, /* A simple contiguous access. */ VMAT_CONTIGUOUS, /* A contiguous access that goes down in memory rather than up, with no additional permutation. This is used only for stores of invariants. */ VMAT_CONTIGUOUS_DOWN, /* A simple contiguous access in which the elements need to be permuted after loading or before storing. Only used for loop vectorization; SLP uses separate permutes. */ VMAT_CONTIGUOUS_PERMUTE, /* A simple contiguous access in which the elements need to be reversed after loading or before storing. */ VMAT_CONTIGUOUS_REVERSE, /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ VMAT_LOAD_STORE_LANES, /* An access in which each scalar element is loaded or stored individually. */ VMAT_ELEMENTWISE, /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped SLP accesses. Each unrolled iteration uses a contiguous load or store for the whole group, but the groups from separate iterations are combined in the same way as for VMAT_ELEMENTWISE. */ VMAT_STRIDED_SLP, /* The access uses gather loads or scatter stores. */ VMAT_GATHER_SCATTER }; struct dr_vec_info { /* The data reference itself. */ data_reference *dr; /* The statement that contains the data reference. */ stmt_vec_info stmt; /* The misalignment in bytes of the reference, or -1 if not known. */ int misalignment; /* The byte alignment that we'd ideally like the reference to have, and the value that misalignment is measured against. */ poly_uint64 target_alignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; tree base_decl; }; typedef struct data_reference *dr_p; struct _stmt_vec_info { enum stmt_vec_info_type type; /* Indicates whether this stmts is part of a computation whose result is used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* True if the statement was created during pattern recognition as part of the replacement for RELATED_STMT. This implies that the statement isn't part of any basic block, although for convenience its gimple_bb is the same as for RELATED_STMT. */ bool pattern_stmt_p; /* Is this statement vectorizable or should it be skipped in (partial) vectorization. */ bool vectorizable; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ stmt_vec_info vectorized_stmt; /* The following is relevant only for stmts that contain a non-scalar data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have at most one such data-ref. */ dr_vec_info dr_aux; /* Information about the data-ref relative to this loop nest (the loop that is being considered for vectorization). */ innermost_loop_behavior dr_wrt_vec_loop; /* For loop PHI nodes, the base and evolution part of it. This makes sure this information is still available in vect_update_ivs_after_vectorizer where we may not be able to re-analyze the PHI nodes evolution as peeling for the prologue loop can make it unanalyzable. The evolution part is still correct after peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* Used for various bookkeeping purposes, generally holding a pointer to some other stmt S that is in some way "related" to this stmt. Current use of this field is: If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is true): S is the "pattern stmt" that represents (and replaces) the sequence of stmts that constitutes the pattern. Similarly, the related_stmt of the "pattern stmt" points back to this stmt (which is the last stmt in the original sequence of stmts that constitutes the pattern). */ stmt_vec_info related_stmt; /* Used to keep a sequence of def stmts of a pattern stmt if such exists. The sequence is attached to the original statement rather than the pattern statement. */ gimple_seq pattern_def_seq; /* List of datarefs that are known to have the same alignment as the dataref of this stmt. */ vec<dr_p> same_align_refs; /* Selected SIMD clone's function info. First vector element is SIMD clone's function decl, followed by a pair of trees (base + step) for linear arguments (pair of NULLs for other arguments). */ vec<tree> simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ stmt_vec_info first_element; /* Pointer to the next element in the group. */ stmt_vec_info next_element; /* The size of the group. */ unsigned int size; /* For stores, number of stores from this group seen. We vectorize the last one. */ unsigned int store_count; /* For loads only, the gap from the previous load. For consecutive loads, GAP is 1. */ unsigned int gap; /* The minimum negative dependence distance this stmt participates in or zero if none. */ unsigned int min_neg_dist; /* Not all stmts in the loop need to be vectorized. e.g, the increment of the loop induction variable and computation of array indexes. relevant indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* Classifies how the load or store is going to be implemented for loop vectorization. */ vect_memory_access_type memory_access_type; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* For CONST_COND_REDUCTION, record the reduc code. */ enum tree_code const_cond_reduc_code; /* On a reduction PHI the reduction type as detected by vect_force_simple_reduction. */ enum vect_reduction_type reduc_type; /* On a reduction PHI the def returned by vect_force_simple_reduction. On the def returned by vect_force_simple_reduction the corresponding PHI. */ stmt_vec_info reduc_def; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; /* If nonzero, the lhs of the statement could be truncated to this many bits without affecting any users of the result. */ unsigned int min_output_precision; /* If nonzero, all non-boolean input operands have the same precision, and they could each be truncated to this many bits without changing the result. */ unsigned int min_input_precision; /* If OPERATION_BITS is nonzero, the statement could be performed on an integer with the sign and number of bits given by OPERATION_SIGN and OPERATION_BITS without changing the result. */ unsigned int operation_precision; signop operation_sign; }; /* Information about a gather/scatter call. */ struct gather_scatter_info { /* The internal function to use for the gather/scatter operation, or IFN_LAST if a built-in function should be used instead. */ internal_fn ifn; /* The FUNCTION_DECL for the built-in gather/scatter function, or null if an internal function should be used instead. */ tree decl; /* The loop-invariant base value. */ tree base; /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ tree offset; /* Each offset element should be multiplied by this amount before being added to the base. */ int scale; /* The definition type for the vectorized offset. */ enum vect_def_type offset_dt; /* The type of the vectorized offset. */ tree offset_vectype; /* The type of the scalar elements after loading or before storing. */ tree element_type; /* The type of the scalar elements being loaded or stored. */ tree memory_type; }; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO (stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0) #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ (S)->dr_wrt_vec_loop.base_misalignment #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.offset_alignment #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.step_alignment #define STMT_VINFO_DR_INFO(S) \ (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux) #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUPED_ACCESS(S) \ ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S)) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define DR_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element) #define DR_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element) #define DR_GROUP_SIZE(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->size) #define DR_GROUP_STORE_COUNT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count) #define DR_GROUP_GAP(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) #define REDUC_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) #define REDUC_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) #define REDUC_GROUP_SIZE(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type #define VECT_MAX_COST 1000 /* The maximum number of intermediate steps required in multi-step type conversion. */ #define MAX_INTERM_CVT_STEPS 3 #define MAX_VECTORIZATION_FACTOR INT_MAX /* Nonzero if TYPE represents a (scalar) boolean type or type in the middle-end compatible with it (unsigned precision 1 integral types). Used to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P. */ #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || ((TREE_CODE (TYPE) == INTEGER_TYPE \ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ && TYPE_PRECISION (TYPE) == 1 \ && TYPE_UNSIGNED (TYPE))) static inline bool nested_in_vect_loop_p (struct loop *loop, stmt_vec_info stmt_info) { return (loop->inner && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father)); } /* Return TRUE if a statement represented by STMT_INFO is a part of a pattern. */ static inline bool is_pattern_stmt_p (stmt_vec_info stmt_info) { return stmt_info->pattern_stmt_p; } /* If STMT_INFO is a pattern statement, return the statement that it replaces, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_orig_stmt (stmt_vec_info stmt_info) { if (is_pattern_stmt_p (stmt_info)) return STMT_VINFO_RELATED_STMT (stmt_info); return stmt_info; } /* Return the later statement between STMT1_INFO and STMT2_INFO. */ static inline stmt_vec_info get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) { if (gimple_uid (vect_orig_stmt (stmt1_info)->stmt) > gimple_uid (vect_orig_stmt (stmt2_info)->stmt)) return stmt1_info; else return stmt2_info; } /* If STMT_INFO has been replaced by a pattern statement, return the replacement statement, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_stmt_to_vectorize (stmt_vec_info stmt_info) { if (STMT_VINFO_IN_PATTERN_P (stmt_info)) return STMT_VINFO_RELATED_STMT (stmt_info); return stmt_info; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p (basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert (EDGE_COUNT (bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2 (int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost (type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost (type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost (struct loop *loop_info) { return targetm.vectorize.init_cost (loop_info); } extern void dump_stmt_cost (FILE *, void *, int, enum vect_cost_for_stmt, stmt_vec_info, int, unsigned, enum vect_cost_model_location); /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { unsigned cost = targetm.vectorize.add_stmt_cost (data, count, kind, stmt_info, misalign, where); if (dump_file && (dump_flags & TDF_DETAILS)) dump_stmt_cost (dump_file, data, count, kind, stmt_info, misalign, cost, where); return cost; } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost (void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost (data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data (void *data) { targetm.vectorize.destroy_cost_data (data); } inline void add_stmt_costs (void *data, stmt_vector_for_cost *cost_vec) { stmt_info_for_cost *cost; unsigned i; FOR_EACH_VEC_ELT (*cost_vec, i, cost) add_stmt_cost (data, cost->count, cost->kind, cost->stmt_info, cost->misalign, cost->where); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ #define DR_MISALIGNMENT_UNKNOWN (-1) #define DR_MISALIGNMENT_UNINITIALIZED (-2) inline void set_dr_misalignment (dr_vec_info *dr_info, int val) { dr_info->misalignment = val; } inline int dr_misalignment (dr_vec_info *dr_info) { int misalign = dr_info->misalignment; gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED); return misalign; } /* Reflects actual alignment of first access in the vectorized loop, taking into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) /* Only defined once DR_MISALIGNMENT is defined. */ #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment) /* Return true if data access DR_INFO is aligned to its target alignment (which may be less than a full vector). */ static inline bool aligned_access_p (dr_vec_info *dr_info) { return (DR_MISALIGNMENT (dr_info) == 0); } /* Return TRUE if the alignment of the data access is known, and FALSE otherwise. */ static inline bool known_alignment_for_access_p (dr_vec_info *dr_info) { return (DR_MISALIGNMENT (dr_info) != DR_MISALIGNMENT_UNKNOWN); } /* Return the minimum alignment in bytes that the vectorized version of DR_INFO is guaranteed to have. */ static inline unsigned int vect_known_alignment_in_bytes (dr_vec_info *dr_info) { if (DR_MISALIGNMENT (dr_info) == DR_MISALIGNMENT_UNKNOWN) return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr))); if (DR_MISALIGNMENT (dr_info) == 0) return known_alignment (DR_TARGET_ALIGNMENT (dr_info)); return DR_MISALIGNMENT (dr_info) & -DR_MISALIGNMENT (dr_info); } /* Return the behavior of DR_INFO with respect to the vectorization context (which for outer loop vectorization might not be the behavior recorded in DR_INFO itself). */ static inline innermost_loop_behavior * vect_dr_behavior (dr_vec_info *dr_info) { stmt_vec_info stmt_info = dr_info->stmt; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); if (loop_vinfo == NULL || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info)) return &DR_INNERMOST (dr_info->dr); else return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model (loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* Return true if the loop described by LOOP_VINFO is fully-masked and if the first iteration should use a partial mask in order to achieve alignment. */ static inline bool vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo) { return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)); } /* Return the number of vectors of type VECTYPE that are needed to get NUNITS elements. NUNITS should be based on the vectorization factor, so it is always a known multiple of the number of elements in VECTYPE. */ static inline unsigned int vect_get_num_vectors (poly_uint64 nunits, tree vectype) { return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant (); } /* Return the number of copies needed for loop vectorization when a statement operates on vectors of type VECTYPE. This is the vectorization factor divided by the number of elements in VECTYPE and is always known at compile time. */ static inline unsigned int vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype) { return vect_get_num_vectors (LOOP_VINFO_VECT_FACTOR (loop_vinfo), vectype); } /* Update maximum unit count *MAX_NUNITS so that it accounts for the number of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet recorded any vector types. */ static inline void vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype) { /* All unit counts have the form current_vector_size * X for some rational X, so two unit sizes must have a common multiple. Everything is a multiple of the initial value of 1. */ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); *max_nunits = force_common_multiple (*max_nunits, nunits); } /* Return the vectorization factor that should be used for costing purposes while vectorizing the loop described by LOOP_VINFO. Pick a reasonable estimate if the vectorization factor isn't known at compile time. */ static inline unsigned int vect_vf_for_cost (loop_vec_info loop_vinfo) { return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); } /* Estimate the number of elements in VEC_TYPE for costing purposes. Pick a reasonable estimate if the exact number isn't known at compile time. */ static inline unsigned int vect_nunits_for_cost (tree vec_type) { return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type)); } /* Return the maximum possible vectorization factor for LOOP_VINFO. */ static inline unsigned HOST_WIDE_INT vect_max_vf (loop_vec_info loop_vinfo) { unsigned HOST_WIDE_INT vf; if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf)) return vf; return MAX_VECTORIZATION_FACTOR; } /* Return the size of the value accessed by unvectorized data reference DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated for the associated gimple statement, since that guarantees that DR_INFO accesses either a scalar or a scalar equivalent. ("Scalar equivalent" here includes things like V1SI, which can be vectorized in the same way as a plain SI.) */ inline unsigned int vect_get_scalar_dr_size (dr_vec_info *dr_info) { return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr)))); } /* Source location + hotness information. */ extern dump_user_location_t vect_location; /* A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc, and then calling dump_end_scope (); once the object goes out of scope, thus capturing the nesting of the scopes. These scopes affect dump messages within them: dump messages at the top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */ #define DUMP_VECT_SCOPE(MSG) \ AUTO_DUMP_SCOPE (MSG, vect_location) /* A sentinel class for ensuring that the "vect_location" global gets reset at the end of a scope. The "vect_location" global is used during dumping and contains a location_t, which could contain references to a tree block via the ad-hoc data. This data is used for tracking inlining information, but it's not a GC root; it's simply assumed that such locations never get accessed if the blocks are optimized away. Hence we need to ensure that such locations are purged at the end of any operations using them (e.g. via this class). */ class auto_purge_vect_location { public: ~auto_purge_vect_location (); }; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* Simple loop peeling and versioning utilities for vectorizer's purposes - in tree-vect-loop-manip.c. */ extern void vect_set_loop_condition (struct loop *, loop_vec_info, tree, tree, tree, bool); extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge); struct loop *slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *, struct loop *, edge); struct loop *vect_loop_versioning (loop_vec_info, unsigned int, bool, poly_uint64); extern struct loop *vect_do_peeling (loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool); extern void vect_prepare_for_masked_peels (loop_vec_info); extern dump_user_location_t find_loop_location (struct loop *); extern bool vect_can_advance_ivs_p (loop_vec_info); /* In tree-vect-stmts.c. */ extern poly_uint64 current_vector_size; extern tree get_vectype_for_scalar_type (tree); extern tree get_vectype_for_scalar_type_and_size (tree, poly_uint64); extern tree get_mask_type_for_scalar_type (tree); extern tree get_same_sized_vectype (tree, tree); extern bool vect_get_loop_mask_type (loop_vec_info); extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool supportable_widening_operation (enum tree_code, stmt_vec_info, tree, tree, enum tree_code *, enum tree_code *, int *, vec<tree> *); extern bool supportable_narrowing_operation (enum tree_code, tree, tree, enum tree_code *, int *, vec<tree> *); extern unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern stmt_vec_info vect_finish_replace_stmt (stmt_vec_info, gimple *); extern stmt_vec_info vect_finish_stmt_generation (stmt_vec_info, gimple *, gimple_stmt_iterator *); extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info); extern tree vect_get_store_rhs (stmt_vec_info); extern tree vect_get_vec_def_for_operand_1 (stmt_vec_info, enum vect_def_type); extern tree vect_get_vec_def_for_operand (tree, stmt_vec_info, tree = NULL); extern void vect_get_vec_defs (tree, tree, stmt_vec_info, vec<tree> *, vec<tree> *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy (vec_info *, vec<tree> *, vec<tree> *); extern tree vect_init_vector (stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy (vec_info *, tree); extern bool vect_transform_stmt (stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance); extern void vect_remove_stores (stmt_vec_info); extern opt_result vect_analyze_stmt (stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_condition (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, bool, slp_tree, stmt_vector_for_cost *); extern bool vectorizable_shift (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern void vect_get_load_cost (stmt_vec_info, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost (stmt_vec_info, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift (enum tree_code, tree); extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &); extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &); extern void optimize_mask_stores (struct loop*); extern gcall *vect_gen_while (tree, tree, tree); extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree); extern opt_result vect_get_vector_types_for_stmt (stmt_vec_info, tree *, tree *); extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64); extern enum dr_alignment_support vect_supportable_dr_alignment (dr_vec_info *, bool); extern tree vect_get_smallest_scalar_type (stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence (slp_instance); extern opt_result vect_enhance_data_refs_alignment (loop_vec_info); extern opt_result vect_analyze_data_refs_alignment (loop_vec_info); extern opt_result vect_verify_datarefs_alignment (loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance); extern opt_result vect_analyze_data_ref_accesses (vec_info *); extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info); extern bool vect_gather_scatter_fn_p (bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); extern bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info, gather_scatter_info *); extern opt_result vect_find_stmt_data_reference (loop_p, gimple *, vec<data_reference_p> *); extern opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *); extern void vect_record_base_alignments (vec_info *); extern tree vect_create_data_ref_ptr (stmt_vec_info, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree = NULL_TREE, tree = NULL_TREE); extern tree bump_vector_ptr (tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree); extern void vect_copy_ref_info (tree, tree); extern tree vect_create_destination_var (tree, tree); extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool); extern void vect_permute_store_chain (vec<tree> ,unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec<tree> *); extern tree vect_setup_realignment (stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load (stmt_vec_info, vec<tree> , int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors (stmt_vec_info, vec<tree>); extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char * = NULL); extern tree vect_create_addr_base_for_vector_ref (stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern stmt_vec_info vect_force_simple_reduction (loop_vec_info, stmt_vec_info, bool *, bool); /* Used in gimple-loop-interchange.c. */ extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree, enum tree_code); /* Drive for loop analysis stage. */ extern opt_loop_vec_info vect_analyze_loop (struct loop *, loop_vec_info, vec_info_shared *); extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL); extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *, tree *, bool); extern tree vect_halve_mask_nunits (tree); extern tree vect_double_mask_nunits (tree); extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *, unsigned int, tree); extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop (loop_vec_info); extern opt_loop_vec_info vect_analyze_loop_form (struct loop *, vec_info_shared *); extern bool vectorizable_live_operation (stmt_vec_info, gimple_stmt_iterator *, slp_tree, int, stmt_vec_info *, stmt_vector_for_cost *); extern bool vectorizable_reduction (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_induction (stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern tree get_initial_def_for_reduction (stmt_vec_info, tree, tree *); extern bool vect_worthwhile_without_simd_p (vec_info *, tree_code); extern int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance (slp_instance, bool); extern bool vect_transform_slp_perm_load (slp_tree, vec<tree> , gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations (vec_info *); extern void vect_schedule_slp (vec_info *); extern opt_result vect_analyze_slp (vec_info *, unsigned); extern bool vect_make_slp_decision (loop_vec_info); extern void vect_detect_hybrid_slp (loop_vec_info); extern void vect_get_slp_defs (vec<tree> , slp_tree, vec<vec<tree> > *); extern bool vect_slp_bb (basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree); extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info); extern bool can_duplicate_and_interleave_p (unsigned int, machine_mode, unsigned int * = NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave (gimple_seq *, tree, vec<tree>, unsigned int, vec<tree> &); extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info); /* In tree-vect-patterns.c. */ /* Pattern recognition functions. Additional pattern recognition functions can (and will) be added in the future. */ void vect_pattern_recog (vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops (void); void vect_free_loop_info_assumptions (struct loop *); #endif /* GCC_TREE_VECTORIZER_H */
#ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H typedef struct _stmt_vec_info *stmt_vec_info; #include "tree-data-ref.h" #include "tree-hash-traits.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION, CONST_COND_REDUCTION, /* * Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop to * implement: * * for (int i = 0; i < VF; ++i) res = cond[i] ? val[i] : res; */ EXTRACT_LAST_REDUCTION, /* * Use a folding reduction within the loop to implement: * * for (int i = 0; i < VF; ++i) res = res OP val[i]; * * (with no reassocation). */ FOLD_LEFT_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* * Structure to encapsulate information about a group of like instructions to * be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; enum vect_cost_model_location where; stmt_vec_info stmt_info; int misalign; }; typedef vec < stmt_info_for_cost > stmt_vector_for_cost; /* * Maps base addresses to an innermost_loop_behavior that gives the maximum * known alignment for that base. */ typedef hash_map < tree_operand_hash, innermost_loop_behavior * >vec_base_alignments; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* * A computation tree of an SLP instance. Each node corresponds to a group * of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec < slp_tree > children; /* A group of scalar stmts to be vectorized together. */ vec < stmt_vec_info > stmts; /* * Load permutation relative to the stores, NULL if there is no * permutation. */ vec < unsigned >load_permutation; /* Vectorized stmt/s. */ vec < stmt_vec_info > vec_stmts; /* * Number of vector stmts that are created to replace the group of scalar * stmts. It is calculated during the transformation phase as the number * of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by * VF divided by vector size. */ unsigned int vec_stmts_size; /* Reference count in the SLP graph. */ unsigned int refcnt; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* * SLP instance is a sequence of stmts in a loop that can be packed into SIMD * stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ poly_uint64 unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec < slp_tree > loads; /* The SLP node containing the reduction PHIs. */ slp_tree reduc_phis; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* * Describes two objects whose addresses must be unequal for the vectorized * loop to be valid. */ typedef std::pair < tree, tree > vec_object_pair; /* * Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. * UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ struct vec_lower_bound { vec_lower_bound() { } vec_lower_bound(tree e, bool u, poly_uint64 m) :expr(e), unsigned_p(u), min_value(m) { } tree expr; bool unsigned_p; poly_uint64 min_value; }; /* * Vectorizer state shared between different analyses like vector sizes of * the same CFG region. */ struct vec_info_shared { vec_info_shared(); ~vec_info_shared(); void save_datarefs(); void check_datarefs(); /* All data references. Freed by free_data_refs, so not an auto_vec. */ vec < data_reference_p > datarefs; vec < data_reference > datarefs_copy; /* The loop nest in which the data dependences are computed. */ auto_vec < loop_p > loop_nest; /* * All data dependences. Freed by free_dependence_relations, so not an * auto_vec. */ vec < ddr_p > ddrs; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum vec_kind { bb, loop }; vec_info(vec_kind, void *, vec_info_shared *); ~vec_info(); stmt_vec_info add_stmt(gimple *); stmt_vec_info lookup_stmt(gimple *); stmt_vec_info lookup_def(tree); stmt_vec_info lookup_single_use(tree); struct dr_vec_info *lookup_dr(data_reference *); void move_dr(stmt_vec_info, stmt_vec_info); void remove_stmt(stmt_vec_info); void replace_stmt(gimple_stmt_iterator *, stmt_vec_info, gimple *); /* The type of vectorization. */ vec_kind kind; /* Shared vectorizer state. */ vec_info_shared *shared; /* The mapping of GIMPLE UID to stmt_vec_info. */ vec < stmt_vec_info > stmt_vec_infos; /* All SLP instances. */ auto_vec < slp_instance > slp_instances; /* * Maps base addresses to an innermost_loop_behavior that gives the * maximum known alignment for that base. */ vec_base_alignments base_alignments; /* * All interleaving chains of stores, represented by the first stmt in * the chain. */ auto_vec < stmt_vec_info > grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; private: stmt_vec_info new_stmt_vec_info(gimple * stmt); void set_vinfo_for_stmt(gimple *, stmt_vec_info); void free_stmt_vec_infos(); void free_stmt_vec_info(stmt_vec_info); }; struct _loop_vec_info; struct _bb_vec_info; template <> template <> inline bool is_a_helper < _loop_vec_info * >: :test(vec_info * i) { return i->kind == vec_info: :loop; } template <> template <> inline bool is_a_helper < _bb_vec_info * >: :test(vec_info * i) { return i->kind == vec_info: :bb; } /* * In general, we can divide the vector statements in a vectorized loop into * related groups ("rgroups") and say that for each rgroup there is some nS * such that the rgroup operates on nS values from one scalar iteration * followed by nS values from the next. That is, if VF is the vectorization * factor of the loop, the rgroup operates on a sequence: * * (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) * * where (i,j) represents a scalar value with index j in a scalar iteration with * index i. * * [ We use the term "rgroup" to emphasise that this grouping isn't necessarily * the same as the grouping of statements used elsewhere. For example, if we * implement a group of scalar loads using gather loads, we'll use a separate * gather load for each scalar load, and thus each gather load will belong to * its own rgroup. ] * * In general this sequence will occupy nV vectors concatenated together. If * these vectors have nL lanes each, the total number of scalar values N is * given by: * * N = nS * VF = nV * nL * * None of nS, VF, nV and nL are required to be a power of 2. nS and nV are * compile-time constants but VF and nL can be variable (if the target * supports variable-length vectors). * * In classical vectorization, each iteration of the vector loop would handle * exactly VF iterations of the original scalar loop. However, in a * fully-masked loop, a particular iteration of the vector loop might handle * fewer than VF iterations of the scalar loop. The vector lanes that * correspond to iterations of the scalar loop are said to be "active" and * the other lanes are said to be "inactive". * * In a fully-masked loop, many rgroups need to be masked to ensure that they * have no effect for the inactive lanes. Each such rgroup needs a sequence * of booleans in the same order as above, but with each (i,j) replaced by a * boolean that indicates whether iteration i is active. This sequence * occupies nV vector masks that again have nL lanes each. Thus the mask * sequence as a whole consists of VF independent booleans that are each * repeated nS times. * * We make the simplifying assumption that if a sequence of nV masks is suitable * for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by VIEW_CONVERTing * it. This holds for all current targets that support fully-masked loops. * For example, suppose the scalar loop is: * * float *f; double *d; for (int i = 0; i < n; ++i) { f[i * 2 + 0] += 1.0f; f[i * * 2 + 1] += 2.0f; d[i] += 3.0; } * * and suppose that vectors have 256 bits. The vectorized f accesses will * belong to one rgroup and the vectorized d access to another: * * f rgroup: nS = 2, nV = 1, nL = 8 d rgroup: nS = 1, nV = 1, nL = 4 VF = 4 * * [ In this simple example the rgroups do correspond to the normal SLP grouping * scheme. ] * * If only the first three lanes are active, the masks we need are: * * f rgroup: 1 1 | 1 1 | 1 1 | 0 0 d rgroup: 1 | 1 | 1 | 0 * * Here we can use a mask calculated for f's rgroup for d's, but not vice versa. * * Thus for each value of nV, it is enough to provide nV masks, with the mask * being calculated based on the highest nL (or, equivalently, based on the * highest nS) required by any rgroup with that nV. We therefore represent * the entire collection of masks as a two-level table, with the first level * being indexed by nV - 1 (since nV == 0 doesn't exist) and the second being * indexed by the mask index 0 <= i < nV. */ /* * The masks needed by rgroups with nV vectors, according to the description * above. */ struct rgroup_masks { /* The largest nS for all rgroups that use these masks. */ unsigned int max_nscalars_per_iter; /* The type of mask to use, based on the highest nS recorded above. */ tree mask_type; /* A vector of nV masks, in iteration order. */ vec < tree > masks; }; typedef auto_vec < rgroup_masks > vec_loop_masks; /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info:public vec_info { _loop_vec_info(struct loop *, vec_info_shared *); ~_loop_vec_info(); /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Condition under which this loop is analyzed and versioned. */ tree num_iters_assumptions; /* * Threshold of number of iterations below which vectorzation will not be * performed. It is calculated from MIN_PROFITABLE_ITERS and * PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* * When applying loop versioning, the vector form should only be used if * the number of scalar iterations is >= this value, on top of all the * other requirements. Ignored when loop versioning is not being used. */ poly_uint64 versioning_threshold; /* Unrolling factor */ poly_uint64 vectorization_factor; /* * Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR if * there is no particular limit. */ unsigned HOST_WIDE_INT max_vectorization_factor; /* * The masks that a fully-masked loop should use to avoid operating on * inactive scalars. */ vec_loop_masks masks; /* * If we are using a loop mask to align memory addresses, this variable * contains the number of vector elements that we should skip in the * first iteration of the vector loop (i.e. the number of leading * elements that should be false in the first mask). */ tree mask_skip_niters; /* * Type of the variables to use in the WHILE_ULT call for fully-masked * loops. */ tree mask_compare_type; /* * For the loop should not be vectorized, if constant non-zero, * simd_if_cond shouldn't be set and loop vectorized normally, if * SSA_NAME, the loop should be versioned on that condition, using scalar * loop if the condition is false and vectorized loop otherwise. */ tree simd_if_cond; /* Unknown DRs according to which loop was peeled. */ struct dr_vec_info *unaligned_dr; /* * peeling_for_alignment indicates whether peeling for alignment will * take place, and what the peeling factor should be: * peeling_for_alignment = X means: If X=0: Peeling for alignment will * not be applied. If X>0: Peel first X iterations. If X=-1: Generate a * runtime test to calculate the number of iterations to be peeled, using * the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* * Data Dependence Relations defining address ranges that are candidates * for a run-time aliasing check. */ auto_vec < ddr_p > may_alias_ddrs; /* * Data Dependence Relations defining address ranges together with * segment lengths from which the run-time aliasing check is built. */ auto_vec < dr_with_seg_len_pair_t > comp_alias_ddrs; /* Check that the addresses of each pair of objects is unequal. */ auto_vec < vec_object_pair > check_unequal_addrs; /* * List of values that are required to be nonzero. This is used to check * whether things like "x[i * n] += 1;" are safe and eventually gets * added to the checks for lower bounds below. */ auto_vec < tree > check_nonzero; /* List of values that need to be checked for a minimum value. */ auto_vec < vec_lower_bound > lower_bounds; /* * Statements in the loop that have data references that are candidates * for a runtime (loop versioning) misalignment check. */ auto_vec < stmt_vec_info > may_misalign_stmts; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec < stmt_vec_info > reductions; /* * All reduction chains in the loop, represented by the first stmt in the * chain. */ auto_vec < stmt_vec_info > reduction_chains; /* Cost vector for a single scalar iteration. */ auto_vec < stmt_info_for_cost > scalar_cost_vec; /* Map of IV base/step expressions to inserted name in the preheader. */ hash_map < tree_operand_hash, tree > *ivexpr_map; /* * The unrolling factor needed to SLP the loop. In case of that pure SLP * is applied to the loop, i.e., no unrolling is needed, this is 1. */ poly_uint64 slp_unrolling_factor; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* Is the loop vectorizable? */ bool vectorizable; /* Records whether we still have the option of using a fully-masked loop. */ bool can_fully_mask_p; /* True if have decided to use a fully-masked loop. */ bool fully_masked_p; /* * When we have grouped data accesses with gaps, we may introduce invalid * memory accesses. We peel the last iteration of the loop to prevent * this. */ bool peeling_for_gaps; /* * When the number of iterations is not a multiple of the vector size we * need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* * Reductions are canonicalized so that the last operand is the reduction * operand. If this places a constant into RHS1, this decanonicalizes * GIMPLE for other phases, so we must track when this has occurred and * fix it up. */ bool operands_swapped; /* * True if there are no loop carried data dependencies in the loop. If * loop->safelen <= 1, then this is always true, either the loop didn't * have any loop carried data dependencies, or the loop is being * vectorized guarded with some runtime alias checks, or couldn't be * vectorized at all, but then this field shouldn't be used. For * loop->safelen >= 2, the user has asserted that there are no backward * dependencies, but there still could be loop carried forward * dependencies in such loops. This flag will be false if normal * vectorizer data dependency analysis would fail or require versioning * for alias, but because of loop->safelen >= 2 it has been vectorized * even without versioning for alias. E.g. in: for (int i = 0; i < m; * i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma ivdep) we can * vectorize this and it will DTRT even for k > 0 && k < m, but without * safelen we would not vectorize this, so this field would be false. */ bool no_data_dependencies; /* Mark loops having masked stores. */ bool has_mask_store; /* * If if-conversion versioned this loop before conversion, this is the * loop version without if-conversion. */ struct loop *scalar_loop; /* * For loops being epilogues of already vectorized loops this points to * the original vectorized loop. Otherwise NULL. */ _loop_vec_info *orig_loop_info; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* * Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue * peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor #define LOOP_VINFO_MASKS(L) (L)->masks #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->comp_alias_ddrs.length () > 0 \ || (L)->check_unequal_addrs.length () > 0 \ || (L)->lower_bounds.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \ (LOOP_VINFO_SIMD_IF_COND (L)) #define LOOP_REQUIRES_VERSIONING(L) \ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L)) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) #define LOOP_VINFO_EPILOGUE_P(L) \ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) /* * Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL * value signifies success, and a NULL value signifies failure, supporting * propagating an opt_problem * describing the failure back up the call * stack. */ typedef opt_pointer_wrapper < loop_vec_info > opt_loop_vec_info; static inline loop_vec_info loop_vec_info_for_loop(struct loop *loop) { return (loop_vec_info) loop->aux; } typedef struct _bb_vec_info:public vec_info { _bb_vec_info(gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); ~_bb_vec_info(); basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs #define BB_VINFO_DDRS(B) (B)->shared->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb(basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* * Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is only used outside the loop. */ vect_used_only_live, /* * The def is in the inner loop, and the use is in the outer loop, and * the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* * The def is in the inner loop, and the use is in the outer loop (and is * not part of reduction). */ vect_used_in_outer, /* * defs that feed computations that end up (only) in a reduction. These * defs may be used by non-reduction stmts, but eventually, any * computations/values that are affected by these defs are used to * compute a reduction (i.e. don't get stored to memory, for example). We * use this to identify computations that we can change the order in * which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* * The type of vectorization that can be applied to the stmt: regular * loop-based vectorization; pure SLP - the stmt is a part of SLP instances * and does not have uses outside SLP instances; or hybrid SLP and loop-based * - the stmt is a part of SLP instance and also must be loop-based * vectorized, since it has uses outside SLP sequences. * * In the loop context the meanings of pure and hybrid SLP are slightly * different. By saying that pure SLP is applied to the loop, we mean that we * exploit only intra-iteration parallelism in the loop; i.e., the loop can * be vectorized without doing any conceptual unrolling, cause we don't pack * together stmts from different iterations, only within a single iteration. * Loop hybrid SLP means that we exploit both intra-iteration and * inter-iteration parallelism (e.g., number of elements in the vector is 4 * and the slp-group-size is 2, in which case we don't have enough * parallelism within an iteration, so we obtain the rest of the parallelism * from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; /* * Says whether a statement is a load, a store of a vectorized statement * result, or a store of an invariant value. */ enum vec_load_store_type { VLS_LOAD, VLS_STORE, VLS_STORE_INVARIANT }; /* * Describes how we're going to vectorize an individual load or store, or a * group of loads or stores. */ enum vect_memory_access_type { /* An access to an invariant address. This is used only for loads. */ VMAT_INVARIANT, /* A simple contiguous access. */ VMAT_CONTIGUOUS, /* * A contiguous access that goes down in memory rather than up, with no * additional permutation. This is used only for stores of invariants. */ VMAT_CONTIGUOUS_DOWN, /* * A simple contiguous access in which the elements need to be permuted * after loading or before storing. Only used for loop vectorization; * SLP uses separate permutes. */ VMAT_CONTIGUOUS_PERMUTE, /* * A simple contiguous access in which the elements need to be reversed * after loading or before storing. */ VMAT_CONTIGUOUS_REVERSE, /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ VMAT_LOAD_STORE_LANES, /* * An access in which each scalar element is loaded or stored * individually. */ VMAT_ELEMENTWISE, /* * A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped SLP * accesses. Each unrolled iteration uses a contiguous load or store for * the whole group, but the groups from separate iterations are combined * in the same way as for VMAT_ELEMENTWISE. */ VMAT_STRIDED_SLP, /* The access uses gather loads or scatter stores. */ VMAT_GATHER_SCATTER }; struct dr_vec_info { /* The data reference itself. */ data_reference *dr; /* The statement that contains the data reference. */ stmt_vec_info stmt; /* The misalignment in bytes of the reference, or -1 if not known. */ int misalignment; /* * The byte alignment that we'd ideally like the reference to have, and * the value that misalignment is measured against. */ poly_uint64 target_alignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; tree base_decl; }; typedef struct data_reference *dr_p; struct _stmt_vec_info { enum stmt_vec_info_type type; /* * Indicates whether this stmts is part of a computation whose result is * used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* * True if the statement was created during pattern recognition as part * of the replacement for RELATED_STMT. This implies that the statement * isn't part of any basic block, although for convenience its gimple_bb * is the same as for RELATED_STMT. */ bool pattern_stmt_p; /* * Is this statement vectorizable or should it be skipped in (partial) * vectorization. */ bool vectorizable; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ stmt_vec_info vectorized_stmt; /* * The following is relevant only for stmts that contain a non-scalar * data-ref (array/pointer/struct access). A GIMPLE stmt is expected to * have at most one such data-ref. */ dr_vec_info dr_aux; /* * Information about the data-ref relative to this loop nest (the loop * that is being considered for vectorization). */ innermost_loop_behavior dr_wrt_vec_loop; /* * For loop PHI nodes, the base and evolution part of it. This makes * sure this information is still available in * vect_update_ivs_after_vectorizer where we may not be able to * re-analyze the PHI nodes evolution as peeling for the prologue loop * can make it unanalyzable. The evolution part is still correct after * peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* * Used for various bookkeeping purposes, generally holding a pointer to * some other stmt S that is in some way "related" to this stmt. Current * use of this field is: If this stmt is part of a pattern (i.e. the * field 'in_pattern_p' is true): S is the "pattern stmt" that represents * (and replaces) the sequence of stmts that constitutes the pattern. * Similarly, the related_stmt of the "pattern stmt" points back to this * stmt (which is the last stmt in the original sequence of stmts that * constitutes the pattern). */ stmt_vec_info related_stmt; /* * Used to keep a sequence of def stmts of a pattern stmt if such exists. * The sequence is attached to the original statement rather than the * pattern statement. */ gimple_seq pattern_def_seq; /* * List of datarefs that are known to have the same alignment as the * dataref of this stmt. */ vec < dr_p > same_align_refs; /* * Selected SIMD clone's function info. First vector element is SIMD * clone's function decl, followed by a pair of trees (base + step) for * linear arguments (pair of NULLs for other arguments). */ vec < tree > simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ stmt_vec_info first_element; /* Pointer to the next element in the group. */ stmt_vec_info next_element; /* The size of the group. */ unsigned int size; /* * For stores, number of stores from this group seen. We vectorize the * last one. */ unsigned int store_count; /* * For loads only, the gap from the previous load. For consecutive loads, * GAP is 1. */ unsigned int gap; /* * The minimum negative dependence distance this stmt participates in or * zero if none. */ unsigned int min_neg_dist; /* * Not all stmts in the loop need to be vectorized. e.g, the increment of * the loop induction variable and computation of array indexes. relevant * indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* * Classifies how the load or store is going to be implemented for loop * vectorization. */ vect_memory_access_type memory_access_type; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* For CONST_COND_REDUCTION, record the reduc code. */ enum tree_code const_cond_reduc_code; /* * On a reduction PHI the reduction type as detected by * vect_force_simple_reduction. */ enum vect_reduction_type reduc_type; /* * On a reduction PHI the def returned by vect_force_simple_reduction. On * the def returned by vect_force_simple_reduction the corresponding PHI. */ stmt_vec_info reduc_def; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; /* * If nonzero, the lhs of the statement could be truncated to this many * bits without affecting any users of the result. */ unsigned int min_output_precision; /* * If nonzero, all non-boolean input operands have the same precision, * and they could each be truncated to this many bits without changing * the result. */ unsigned int min_input_precision; /* * If OPERATION_BITS is nonzero, the statement could be performed on an * integer with the sign and number of bits given by OPERATION_SIGN and * OPERATION_BITS without changing the result. */ unsigned int operation_precision; signop operation_sign; }; /* Information about a gather/scatter call. */ struct gather_scatter_info { /* * The internal function to use for the gather/scatter operation, or * IFN_LAST if a built-in function should be used instead. */ internal_fn ifn; /* * The FUNCTION_DECL for the built-in gather/scatter function, or null if * an internal function should be used instead. */ tree decl; /* The loop-invariant base value. */ tree base; /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ tree offset; /* * Each offset element should be multiplied by this amount before being * added to the base. */ int scale; /* The definition type for the vectorized offset. */ enum vect_def_type offset_dt; /* The type of the vectorized offset. */ tree offset_vectype; /* The type of the scalar elements after loading or before storing. */ tree element_type; /* The type of the scalar elements being loaded or stored. */ tree memory_type; }; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO(stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast < loop_vec_info > (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO(stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast < bb_vec_info > (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0) #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ (S)->dr_wrt_vec_loop.base_misalignment #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.offset_alignment #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.step_alignment #define STMT_VINFO_DR_INFO(S) \ (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux) #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUPED_ACCESS(S) \ ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S)) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define DR_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element) #define DR_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element) #define DR_GROUP_SIZE(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->size) #define DR_GROUP_STORE_COUNT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count) #define DR_GROUP_GAP(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) #define REDUC_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) #define REDUC_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) #define REDUC_GROUP_SIZE(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type #define VECT_MAX_COST 1000 /* * The maximum number of intermediate steps required in multi-step type * conversion. */ #define MAX_INTERM_CVT_STEPS 3 #define MAX_VECTORIZATION_FACTOR INT_MAX /* * Nonzero if TYPE represents a (scalar) boolean type or type in the * middle-end compatible with it (unsigned precision 1 integral types). Used * to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P. */ #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || ((TREE_CODE (TYPE) == INTEGER_TYPE \ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ && TYPE_PRECISION (TYPE) == 1 \ && TYPE_UNSIGNED (TYPE))) static inline bool nested_in_vect_loop_p(struct loop *loop, stmt_vec_info stmt_info) { return (loop->inner && (loop->inner == (gimple_bb(stmt_info->stmt))->loop_father)); } /* * Return TRUE if a statement represented by STMT_INFO is a part of a * pattern. */ static inline bool is_pattern_stmt_p(stmt_vec_info stmt_info) { return stmt_info->pattern_stmt_p; } /* * If STMT_INFO is a pattern statement, return the statement that it * replaces, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_orig_stmt(stmt_vec_info stmt_info) { if (is_pattern_stmt_p(stmt_info)) return STMT_VINFO_RELATED_STMT(stmt_info); return stmt_info; } /* Return the later statement between STMT1_INFO and STMT2_INFO. */ static inline stmt_vec_info get_later_stmt(stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) { if (gimple_uid(vect_orig_stmt(stmt1_info)->stmt) > gimple_uid(vect_orig_stmt(stmt2_info)->stmt)) return stmt1_info; else return stmt2_info; } /* * If STMT_INFO has been replaced by a pattern statement, return the * replacement statement, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_stmt_to_vectorize(stmt_vec_info stmt_info) { if (STMT_VINFO_IN_PATTERN_P(stmt_info)) return STMT_VINFO_RELATED_STMT(stmt_info); return stmt_info; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p(basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert(EDGE_COUNT(bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2(int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost(enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost(type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost(enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost(type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost(struct loop *loop_info) { return targetm.vectorize.init_cost(loop_info); } extern void dump_stmt_cost(FILE *, void *, int, enum vect_cost_for_stmt, stmt_vec_info, int, unsigned, enum vect_cost_model_location); /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost(void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { unsigned cost = targetm.vectorize.add_stmt_cost(data, count, kind, stmt_info, misalign, where); if (dump_file && (dump_flags & TDF_DETAILS)) dump_stmt_cost(dump_file, data, count, kind, stmt_info, misalign, cost, where); return cost; } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost(void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost(data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data(void *data) { targetm.vectorize.destroy_cost_data(data); } inline void add_stmt_costs(void *data, stmt_vector_for_cost * cost_vec) { stmt_info_for_cost *cost; unsigned i; FOR_EACH_VEC_ELT(*cost_vec, i, cost) add_stmt_cost(data, cost->count, cost->kind, cost->stmt_info, cost->misalign, cost->where); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ #define DR_MISALIGNMENT_UNKNOWN (-1) #define DR_MISALIGNMENT_UNINITIALIZED (-2) inline void set_dr_misalignment(dr_vec_info * dr_info, int val) { dr_info->misalignment = val; } inline int dr_misalignment(dr_vec_info * dr_info) { int misalign = dr_info->misalignment; gcc_assert(misalign != DR_MISALIGNMENT_UNINITIALIZED); return misalign; } /* * Reflects actual alignment of first access in the vectorized loop, taking * into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) /* Only defined once DR_MISALIGNMENT is defined. */ #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment) /* * Return true if data access DR_INFO is aligned to its target alignment * (which may be less than a full vector). */ static inline bool aligned_access_p(dr_vec_info * dr_info) { return (DR_MISALIGNMENT(dr_info) == 0); } /* * Return TRUE if the alignment of the data access is known, and FALSE * otherwise. */ static inline bool known_alignment_for_access_p(dr_vec_info * dr_info) { return (DR_MISALIGNMENT(dr_info) != DR_MISALIGNMENT_UNKNOWN); } /* * Return the minimum alignment in bytes that the vectorized version of * DR_INFO is guaranteed to have. */ static inline unsigned int vect_known_alignment_in_bytes(dr_vec_info * dr_info) { if (DR_MISALIGNMENT(dr_info) == DR_MISALIGNMENT_UNKNOWN) return TYPE_ALIGN_UNIT(TREE_TYPE(DR_REF(dr_info->dr))); if (DR_MISALIGNMENT(dr_info) == 0) return known_alignment(DR_TARGET_ALIGNMENT(dr_info)); return DR_MISALIGNMENT(dr_info) & -DR_MISALIGNMENT(dr_info); } /* * Return the behavior of DR_INFO with respect to the vectorization context * (which for outer loop vectorization might not be the behavior recorded in * DR_INFO itself). */ static inline innermost_loop_behavior * vect_dr_behavior(dr_vec_info * dr_info) { stmt_vec_info stmt_info = dr_info->stmt; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO(stmt_info); if (loop_vinfo == NULL || !nested_in_vect_loop_p(LOOP_VINFO_LOOP(loop_vinfo), stmt_info)) return &DR_INNERMOST(dr_info->dr); else return &STMT_VINFO_DR_WRT_VEC_LOOP(stmt_info); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model(loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* * Return true if the loop described by LOOP_VINFO is fully-masked and if the * first iteration should use a partial mask in order to achieve alignment. */ static inline bool vect_use_loop_mask_for_alignment_p(loop_vec_info loop_vinfo) { return (LOOP_VINFO_FULLY_MASKED_P(loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT(loop_vinfo)); } /* * Return the number of vectors of type VECTYPE that are needed to get NUNITS * elements. NUNITS should be based on the vectorization factor, so it is * always a known multiple of the number of elements in VECTYPE. */ static inline unsigned int vect_get_num_vectors(poly_uint64 nunits, tree vectype) { return exact_div(nunits, TYPE_VECTOR_SUBPARTS(vectype)).to_constant(); } /* * Return the number of copies needed for loop vectorization when a statement * operates on vectors of type VECTYPE. This is the vectorization factor * divided by the number of elements in VECTYPE and is always known at * compile time. */ static inline unsigned int vect_get_num_copies(loop_vec_info loop_vinfo, tree vectype) { return vect_get_num_vectors(LOOP_VINFO_VECT_FACTOR(loop_vinfo), vectype); } /* * Update maximum unit count *MAX_NUNITS so that it accounts for the number * of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet * recorded any vector types. */ static inline void vect_update_max_nunits(poly_uint64 * max_nunits, tree vectype) { /* * All unit counts have the form current_vector_size * X for some * rational X, so two unit sizes must have a common multiple. Everything * is a multiple of the initial value of 1. */ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS(vectype); *max_nunits = force_common_multiple(*max_nunits, nunits); } /* * Return the vectorization factor that should be used for costing purposes * while vectorizing the loop described by LOOP_VINFO. Pick a reasonable * estimate if the vectorization factor isn't known at compile time. */ static inline unsigned int vect_vf_for_cost(loop_vec_info loop_vinfo) { return estimated_poly_value(LOOP_VINFO_VECT_FACTOR(loop_vinfo)); } /* * Estimate the number of elements in VEC_TYPE for costing purposes. Pick a * reasonable estimate if the exact number isn't known at compile time. */ static inline unsigned int vect_nunits_for_cost(tree vec_type) { return estimated_poly_value(TYPE_VECTOR_SUBPARTS(vec_type)); } /* Return the maximum possible vectorization factor for LOOP_VINFO. */ static inline unsigned HOST_WIDE_INT vect_max_vf(loop_vec_info loop_vinfo) { unsigned HOST_WIDE_INT vf; if (LOOP_VINFO_VECT_FACTOR(loop_vinfo).is_constant(&vf)) return vf; return MAX_VECTORIZATION_FACTOR; } /* * Return the size of the value accessed by unvectorized data reference * DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated * for the associated gimple statement, since that guarantees that DR_INFO * accesses either a scalar or a scalar equivalent. ("Scalar equivalent" * here includes things like V1SI, which can be vectorized in the same way as * a plain SI.) */ inline unsigned int vect_get_scalar_dr_size(dr_vec_info * dr_info) { return tree_to_uhwi(TYPE_SIZE_UNIT(TREE_TYPE(DR_REF(dr_info->dr)))); } /* Source location + hotness information. */ extern dump_user_location_t vect_location; /* * A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII * object, thus printing "=== MSG ===\n" to the dumpfile etc, and then * calling dump_end_scope (); once the object goes out of scope, thus * capturing the nesting of the scopes. * * These scopes affect dump messages within them: dump messages at the top level * implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested * scope implicitly default to MSG_PRIORITY_INTERNALS. */ #define DUMP_VECT_SCOPE(MSG) \ AUTO_DUMP_SCOPE (MSG, vect_location) /* * A sentinel class for ensuring that the "vect_location" global gets reset * at the end of a scope. * * The "vect_location" global is used during dumping and contains a location_t, * which could contain references to a tree block via the ad-hoc data. This * data is used for tracking inlining information, but it's not a GC root; * it's simply assumed that such locations never get accessed if the blocks * are optimized away. * * Hence we need to ensure that such locations are purged at the end of any * operations using them (e.g. via this class). */ class auto_purge_vect_location { public: ~auto_purge_vect_location(); }; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* * Simple loop peeling and versioning utilities for vectorizer's purposes - * in tree-vect-loop-manip.c. */ extern void vect_set_loop_condition(struct loop *, loop_vec_info, tree, tree, tree, bool); extern bool slpeel_can_duplicate_loop_p(const struct loop *, const_edge); struct loop * slpeel_tree_duplicate_loop_to_edge_cfg(struct loop *, struct loop *, edge); struct loop * vect_loop_versioning(loop_vec_info, unsigned int, bool, poly_uint64); extern struct loop * vect_do_peeling(loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool); extern void vect_prepare_for_masked_peels(loop_vec_info); extern dump_user_location_t find_loop_location(struct loop *); extern bool vect_can_advance_ivs_p(loop_vec_info); /* In tree-vect-stmts.c. */ extern poly_uint64 current_vector_size; extern tree get_vectype_for_scalar_type(tree); extern tree get_vectype_for_scalar_type_and_size(tree, poly_uint64); extern tree get_mask_type_for_scalar_type(tree); extern tree get_same_sized_vectype(tree, tree); extern bool vect_get_loop_mask_type(loop_vec_info); extern bool vect_is_simple_use(tree, vec_info *, enum vect_def_type *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool vect_is_simple_use(tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool supportable_widening_operation(enum tree_code, stmt_vec_info, tree, tree, enum tree_code *, enum tree_code *, int *, vec < tree > *); extern bool supportable_narrowing_operation(enum tree_code, tree, tree, enum tree_code *, int *, vec < tree > *); extern unsigned record_stmt_cost(stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern stmt_vec_info vect_finish_replace_stmt(stmt_vec_info, gimple *); extern stmt_vec_info vect_finish_stmt_generation(stmt_vec_info, gimple *, gimple_stmt_iterator *); extern opt_result vect_mark_stmts_to_be_vectorized(loop_vec_info); extern tree vect_get_store_rhs(stmt_vec_info); extern tree vect_get_vec_def_for_operand_1(stmt_vec_info, enum vect_def_type); extern tree vect_get_vec_def_for_operand(tree, stmt_vec_info, tree = NULL); extern void vect_get_vec_defs(tree, tree, stmt_vec_info, vec < tree > *, vec < tree > *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy(vec_info *, vec < tree > *, vec < tree > *); extern tree vect_init_vector(stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy(vec_info *, tree); extern bool vect_transform_stmt(stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance); extern void vect_remove_stores(stmt_vec_info); extern opt_result vect_analyze_stmt(stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_condition(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, bool, slp_tree, stmt_vector_for_cost *); extern bool vectorizable_shift(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern void vect_get_load_cost(stmt_vec_info, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost(stmt_vec_info, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift(enum tree_code, tree); extern tree vect_gen_perm_mask_any(tree, const vec_perm_indices &); extern tree vect_gen_perm_mask_checked(tree, const vec_perm_indices &); extern void optimize_mask_stores(struct loop *); extern gcall *vect_gen_while(tree, tree, tree); extern tree vect_gen_while_not(gimple_seq *, tree, tree, tree); extern opt_result vect_get_vector_types_for_stmt(stmt_vec_info, tree *, tree *); extern opt_tree vect_get_mask_type_for_stmt(stmt_vec_info); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p(const_tree, poly_uint64); extern enum dr_alignment_support vect_supportable_dr_alignment (dr_vec_info *, bool); extern tree vect_get_smallest_scalar_type(stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern opt_result vect_analyze_data_ref_dependences(loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence(slp_instance); extern opt_result vect_enhance_data_refs_alignment(loop_vec_info); extern opt_result vect_analyze_data_refs_alignment(loop_vec_info); extern opt_result vect_verify_datarefs_alignment(loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment(slp_instance); extern opt_result vect_analyze_data_ref_accesses(vec_info *); extern opt_result vect_prune_runtime_alias_test_list(loop_vec_info); extern bool vect_gather_scatter_fn_p(bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); extern bool vect_check_gather_scatter(stmt_vec_info, loop_vec_info, gather_scatter_info *); extern opt_result vect_find_stmt_data_reference(loop_p, gimple *, vec < data_reference_p > *); extern opt_result vect_analyze_data_refs(vec_info *, poly_uint64 *); extern void vect_record_base_alignments(vec_info *); extern tree vect_create_data_ref_ptr(stmt_vec_info, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree = NULL_TREE, tree = NULL_TREE); extern tree bump_vector_ptr(tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree); extern void vect_copy_ref_info(tree, tree); extern tree vect_create_destination_var(tree, tree); extern bool vect_grouped_store_supported(tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported(tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported(tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported(tree, unsigned HOST_WIDE_INT, bool); extern void vect_permute_store_chain(vec < tree >, unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec < tree > *); extern tree vect_setup_realignment(stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load(stmt_vec_info, vec < tree >, int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors(stmt_vec_info, vec < tree >); extern tree vect_get_new_vect_var(tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name(tree, enum vect_var_kind, const char *= NULL); extern tree vect_create_addr_base_for_vector_ref(stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern stmt_vec_info vect_force_simple_reduction(loop_vec_info, stmt_vec_info, bool *, bool); /* Used in gimple-loop-interchange.c. */ extern bool check_reduction_path(dump_user_location_t, loop_p, gphi *, tree, enum tree_code); /* Drive for loop analysis stage. */ extern opt_loop_vec_info vect_analyze_loop(struct loop *, loop_vec_info, vec_info_shared *); extern tree vect_build_loop_niters(loop_vec_info, bool * = NULL); extern void vect_gen_vector_loop_niters(loop_vec_info, tree, tree *, tree *, bool); extern tree vect_halve_mask_nunits(tree); extern tree vect_double_mask_nunits(tree); extern void vect_record_loop_mask(loop_vec_info, vec_loop_masks *, unsigned int, tree); extern tree vect_get_loop_mask(gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop(loop_vec_info); extern opt_loop_vec_info vect_analyze_loop_form(struct loop *, vec_info_shared *); extern bool vectorizable_live_operation(stmt_vec_info, gimple_stmt_iterator *, slp_tree, int, stmt_vec_info *, stmt_vector_for_cost *); extern bool vectorizable_reduction(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_induction(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern tree get_initial_def_for_reduction(stmt_vec_info, tree, tree *); extern bool vect_worthwhile_without_simd_p(vec_info *, tree_code); extern int vect_get_known_peeling_cost(loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern tree cse_and_gimplify_to_preheader(loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance(slp_instance, bool); extern bool vect_transform_slp_perm_load(slp_tree, vec < tree >, gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations(vec_info *); extern void vect_schedule_slp(vec_info *); extern opt_result vect_analyze_slp(vec_info *, unsigned); extern bool vect_make_slp_decision(loop_vec_info); extern void vect_detect_hybrid_slp(loop_vec_info); extern void vect_get_slp_defs(vec < tree >, slp_tree, vec < vec < tree > >*); extern bool vect_slp_bb(basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp(slp_tree); extern bool is_simple_and_all_uses_invariant(stmt_vec_info, loop_vec_info); extern bool can_duplicate_and_interleave_p(unsigned int, machine_mode, unsigned int *= NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave(gimple_seq *, tree, vec < tree >, unsigned int, vec < tree > &); extern int vect_get_place_in_interleaving_chain(stmt_vec_info, stmt_vec_info); /* In tree-vect-patterns.c. */ /* * Pattern recognition functions. Additional pattern recognition functions * can (and will) be added in the future. */ void vect_pattern_recog(vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops(void); void vect_free_loop_info_assumptions(struct loop *); #endif /* GCC_TREE_VECTORIZER_H */
#ifndef GCC_TREE_VECTORIZER_H #define GCC_TREE_VECTORIZER_H typedef struct _stmt_vec_info *stmt_vec_info; #include "tree-data-ref.h" #include "tree-hash-traits.h" #include "target.h" /* Used for naming of new temporaries. */ enum vect_var_kind { vect_simple_var, vect_pointer_var, vect_scalar_var, vect_mask_var }; /* Defines type of operation. */ enum operation_type { unary_op = 1, binary_op, ternary_op }; /* Define type of available alignment support. */ enum dr_alignment_support { dr_unaligned_unsupported, dr_unaligned_supported, dr_explicit_realign, dr_explicit_realign_optimized, dr_aligned }; /* Define type of def-use cross-iteration cycle. */ enum vect_def_type { vect_uninitialized_def = 0, vect_constant_def = 1, vect_external_def, vect_internal_def, vect_induction_def, vect_reduction_def, vect_double_reduction_def, vect_nested_cycle, vect_unknown_def_type }; /* Define type of reduction. */ enum vect_reduction_type { TREE_CODE_REDUCTION, COND_REDUCTION, INTEGER_INDUC_COND_REDUCTION, CONST_COND_REDUCTION, /* * Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop to * implement: * * for (int i = 0; i < VF; ++i) res = cond[i] ? val[i] : res; */ EXTRACT_LAST_REDUCTION, /* * Use a folding reduction within the loop to implement: * * for (int i = 0; i < VF; ++i) res = res OP val[i]; * * (with no reassocation). */ FOLD_LEFT_REDUCTION }; #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \ || ((D) == vect_double_reduction_def) \ || ((D) == vect_nested_cycle)) /* * Structure to encapsulate information about a group of like instructions to * be presented to the target cost model. */ struct stmt_info_for_cost { int count; enum vect_cost_for_stmt kind; enum vect_cost_model_location where; stmt_vec_info stmt_info; int misalign; }; typedef vec < stmt_info_for_cost > stmt_vector_for_cost; /* * Maps base addresses to an innermost_loop_behavior that gives the maximum * known alignment for that base. */ typedef hash_map < tree_operand_hash, innermost_loop_behavior * >vec_base_alignments; /************************************************************************ SLP ************************************************************************/ typedef struct _slp_tree *slp_tree; /* * A computation tree of an SLP instance. Each node corresponds to a group * of stmts to be packed in a SIMD stmt. */ struct _slp_tree { /* Nodes that contain def-stmts of this node statements operands. */ vec < slp_tree > children; /* A group of scalar stmts to be vectorized together. */ vec < stmt_vec_info > stmts; /* * Load permutation relative to the stores, NULL if there is no * permutation. */ vec < unsigned >load_permutation; /* Vectorized stmt/s. */ vec < stmt_vec_info > vec_stmts; /* * Number of vector stmts that are created to replace the group of scalar * stmts. It is calculated during the transformation phase as the number * of scalar elements in one scalar iteration (GROUP_SIZE) multiplied by * VF divided by vector size. */ unsigned int vec_stmts_size; /* Reference count in the SLP graph. */ unsigned int refcnt; /* Whether the scalar computations use two different operators. */ bool two_operators; /* The DEF type of this node. */ enum vect_def_type def_type; }; /* * SLP instance is a sequence of stmts in a loop that can be packed into SIMD * stmts. */ typedef struct _slp_instance { /* The root of SLP tree. */ slp_tree root; /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s. */ unsigned int group_size; /* The unrolling factor required to vectorized this SLP instance. */ poly_uint64 unrolling_factor; /* The group of nodes that contain loads of this SLP instance. */ vec < slp_tree > loads; /* The SLP node containing the reduction PHIs. */ slp_tree reduc_phis; } *slp_instance; /* Access Functions. */ #define SLP_INSTANCE_TREE(S) (S)->root #define SLP_INSTANCE_GROUP_SIZE(S) (S)->group_size #define SLP_INSTANCE_UNROLLING_FACTOR(S) (S)->unrolling_factor #define SLP_INSTANCE_LOADS(S) (S)->loads #define SLP_TREE_CHILDREN(S) (S)->children #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts #define SLP_TREE_VEC_STMTS(S) (S)->vec_stmts #define SLP_TREE_NUMBER_OF_VEC_STMTS(S) (S)->vec_stmts_size #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation #define SLP_TREE_TWO_OPERATORS(S) (S)->two_operators #define SLP_TREE_DEF_TYPE(S) (S)->def_type /* * Describes two objects whose addresses must be unequal for the vectorized * loop to be valid. */ typedef std::pair < tree, tree > vec_object_pair; /* * Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE. * UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */ struct vec_lower_bound { vec_lower_bound() { } vec_lower_bound(tree e, bool u, poly_uint64 m) :expr(e), unsigned_p(u), min_value(m) { } tree expr; bool unsigned_p; poly_uint64 min_value; }; /* * Vectorizer state shared between different analyses like vector sizes of * the same CFG region. */ struct vec_info_shared { vec_info_shared(); ~vec_info_shared(); void save_datarefs(); void check_datarefs(); /* All data references. Freed by free_data_refs, so not an auto_vec. */ vec < data_reference_p > datarefs; vec < data_reference > datarefs_copy; /* The loop nest in which the data dependences are computed. */ auto_vec < loop_p > loop_nest; /* * All data dependences. Freed by free_dependence_relations, so not an * auto_vec. */ vec < ddr_p > ddrs; }; /* Vectorizer state common between loop and basic-block vectorization. */ struct vec_info { enum vec_kind { bb, loop }; vec_info(vec_kind, void *, vec_info_shared *); ~vec_info(); stmt_vec_info add_stmt(gimple *); stmt_vec_info lookup_stmt(gimple *); stmt_vec_info lookup_def(tree); stmt_vec_info lookup_single_use(tree); struct dr_vec_info *lookup_dr(data_reference *); void move_dr(stmt_vec_info, stmt_vec_info); void remove_stmt(stmt_vec_info); void replace_stmt(gimple_stmt_iterator *, stmt_vec_info, gimple *); /* The type of vectorization. */ vec_kind kind; /* Shared vectorizer state. */ vec_info_shared *shared; /* The mapping of GIMPLE UID to stmt_vec_info. */ vec < stmt_vec_info > stmt_vec_infos; /* All SLP instances. */ auto_vec < slp_instance > slp_instances; /* * Maps base addresses to an innermost_loop_behavior that gives the * maximum known alignment for that base. */ vec_base_alignments base_alignments; /* * All interleaving chains of stores, represented by the first stmt in * the chain. */ auto_vec < stmt_vec_info > grouped_stores; /* Cost data used by the target cost model. */ void *target_cost_data; private: stmt_vec_info new_stmt_vec_info(gimple * stmt); void set_vinfo_for_stmt(gimple *, stmt_vec_info); void free_stmt_vec_infos(); void free_stmt_vec_info(stmt_vec_info); }; struct _loop_vec_info; struct _bb_vec_info; template <> template <> inline bool is_a_helper < _loop_vec_info * >: :test(vec_info * i) { return i->kind == vec_info: :loop; } template <> template <> inline bool is_a_helper < _bb_vec_info * >: :test(vec_info * i) { return i->kind == vec_info: :bb; } /* * In general, we can divide the vector statements in a vectorized loop into * related groups ("rgroups") and say that for each rgroup there is some nS * such that the rgroup operates on nS values from one scalar iteration * followed by nS values from the next. That is, if VF is the vectorization * factor of the loop, the rgroup operates on a sequence: * * (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS) * * where (i,j) represents a scalar value with index j in a scalar iteration with * index i. * * [ We use the term "rgroup" to emphasise that this grouping isn't necessarily * the same as the grouping of statements used elsewhere. For example, if we * implement a group of scalar loads using gather loads, we'll use a separate * gather load for each scalar load, and thus each gather load will belong to * its own rgroup. ] * * In general this sequence will occupy nV vectors concatenated together. If * these vectors have nL lanes each, the total number of scalar values N is * given by: * * N = nS * VF = nV * nL * * None of nS, VF, nV and nL are required to be a power of 2. nS and nV are * compile-time constants but VF and nL can be variable (if the target * supports variable-length vectors). * * In classical vectorization, each iteration of the vector loop would handle * exactly VF iterations of the original scalar loop. However, in a * fully-masked loop, a particular iteration of the vector loop might handle * fewer than VF iterations of the scalar loop. The vector lanes that * correspond to iterations of the scalar loop are said to be "active" and * the other lanes are said to be "inactive". * * In a fully-masked loop, many rgroups need to be masked to ensure that they * have no effect for the inactive lanes. Each such rgroup needs a sequence * of booleans in the same order as above, but with each (i,j) replaced by a * boolean that indicates whether iteration i is active. This sequence * occupies nV vector masks that again have nL lanes each. Thus the mask * sequence as a whole consists of VF independent booleans that are each * repeated nS times. * * We make the simplifying assumption that if a sequence of nV masks is suitable * for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by VIEW_CONVERTing * it. This holds for all current targets that support fully-masked loops. * For example, suppose the scalar loop is: * * float *f; double *d; for (int i = 0; i < n; ++i) { f[i * 2 + 0] += 1.0f; f[i * * 2 + 1] += 2.0f; d[i] += 3.0; } * * and suppose that vectors have 256 bits. The vectorized f accesses will * belong to one rgroup and the vectorized d access to another: * * f rgroup: nS = 2, nV = 1, nL = 8 d rgroup: nS = 1, nV = 1, nL = 4 VF = 4 * * [ In this simple example the rgroups do correspond to the normal SLP grouping * scheme. ] * * If only the first three lanes are active, the masks we need are: * * f rgroup: 1 1 | 1 1 | 1 1 | 0 0 d rgroup: 1 | 1 | 1 | 0 * * Here we can use a mask calculated for f's rgroup for d's, but not vice versa. * * Thus for each value of nV, it is enough to provide nV masks, with the mask * being calculated based on the highest nL (or, equivalently, based on the * highest nS) required by any rgroup with that nV. We therefore represent * the entire collection of masks as a two-level table, with the first level * being indexed by nV - 1 (since nV == 0 doesn't exist) and the second being * indexed by the mask index 0 <= i < nV. */ /* * The masks needed by rgroups with nV vectors, according to the description * above. */ struct rgroup_masks { /* The largest nS for all rgroups that use these masks. */ unsigned int max_nscalars_per_iter; /* The type of mask to use, based on the highest nS recorded above. */ tree mask_type; /* A vector of nV masks, in iteration order. */ vec < tree > masks; }; typedef auto_vec < rgroup_masks > vec_loop_masks; /*-----------------------------------------------------------------*/ /* Info on vectorized loops. */ /*-----------------------------------------------------------------*/ typedef struct _loop_vec_info:public vec_info { _loop_vec_info(struct loop *, vec_info_shared *); ~_loop_vec_info(); /* The loop to which this info struct refers to. */ struct loop *loop; /* The loop basic blocks. */ basic_block *bbs; /* Number of latch executions. */ tree num_itersm1; /* Number of iterations. */ tree num_iters; /* Number of iterations of the original loop. */ tree num_iters_unchanged; /* Condition under which this loop is analyzed and versioned. */ tree num_iters_assumptions; /* * Threshold of number of iterations below which vectorzation will not be * performed. It is calculated from MIN_PROFITABLE_ITERS and * PARAM_MIN_VECT_LOOP_BOUND. */ unsigned int th; /* * When applying loop versioning, the vector form should only be used if * the number of scalar iterations is >= this value, on top of all the * other requirements. Ignored when loop versioning is not being used. */ poly_uint64 versioning_threshold; /* Unrolling factor */ poly_uint64 vectorization_factor; /* * Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR if * there is no particular limit. */ unsigned HOST_WIDE_INT max_vectorization_factor; /* * The masks that a fully-masked loop should use to avoid operating on * inactive scalars. */ vec_loop_masks masks; /* * If we are using a loop mask to align memory addresses, this variable * contains the number of vector elements that we should skip in the * first iteration of the vector loop (i.e. the number of leading * elements that should be false in the first mask). */ tree mask_skip_niters; /* * Type of the variables to use in the WHILE_ULT call for fully-masked * loops. */ tree mask_compare_type; /* * For #pragma omp simd if (x) loops the x expression. If constant 0, * the loop should not be vectorized, if constant non-zero, simd_if_cond * shouldn't be set and loop vectorized normally, if SSA_NAME, the loop * should be versioned on that condition, using scalar loop if the * condition is false and vectorized loop otherwise. */ tree simd_if_cond; /* Unknown DRs according to which loop was peeled. */ struct dr_vec_info *unaligned_dr; /* * peeling_for_alignment indicates whether peeling for alignment will * take place, and what the peeling factor should be: * peeling_for_alignment = X means: If X=0: Peeling for alignment will * not be applied. If X>0: Peel first X iterations. If X=-1: Generate a * runtime test to calculate the number of iterations to be peeled, using * the dataref recorded in the field unaligned_dr. */ int peeling_for_alignment; /* The mask used to check the alignment of pointers or arrays. */ int ptr_mask; /* * Data Dependence Relations defining address ranges that are candidates * for a run-time aliasing check. */ auto_vec < ddr_p > may_alias_ddrs; /* * Data Dependence Relations defining address ranges together with * segment lengths from which the run-time aliasing check is built. */ auto_vec < dr_with_seg_len_pair_t > comp_alias_ddrs; /* Check that the addresses of each pair of objects is unequal. */ auto_vec < vec_object_pair > check_unequal_addrs; /* * List of values that are required to be nonzero. This is used to check * whether things like "x[i * n] += 1;" are safe and eventually gets * added to the checks for lower bounds below. */ auto_vec < tree > check_nonzero; /* List of values that need to be checked for a minimum value. */ auto_vec < vec_lower_bound > lower_bounds; /* * Statements in the loop that have data references that are candidates * for a runtime (loop versioning) misalignment check. */ auto_vec < stmt_vec_info > may_misalign_stmts; /* Reduction cycles detected in the loop. Used in loop-aware SLP. */ auto_vec < stmt_vec_info > reductions; /* * All reduction chains in the loop, represented by the first stmt in the * chain. */ auto_vec < stmt_vec_info > reduction_chains; /* Cost vector for a single scalar iteration. */ auto_vec < stmt_info_for_cost > scalar_cost_vec; /* Map of IV base/step expressions to inserted name in the preheader. */ hash_map < tree_operand_hash, tree > *ivexpr_map; /* * The unrolling factor needed to SLP the loop. In case of that pure SLP * is applied to the loop, i.e., no unrolling is needed, this is 1. */ poly_uint64 slp_unrolling_factor; /* Cost of a single scalar iteration. */ int single_scalar_iteration_cost; /* Is the loop vectorizable? */ bool vectorizable; /* Records whether we still have the option of using a fully-masked loop. */ bool can_fully_mask_p; /* True if have decided to use a fully-masked loop. */ bool fully_masked_p; /* * When we have grouped data accesses with gaps, we may introduce invalid * memory accesses. We peel the last iteration of the loop to prevent * this. */ bool peeling_for_gaps; /* * When the number of iterations is not a multiple of the vector size we * need to peel off iterations at the end to form an epilogue loop. */ bool peeling_for_niter; /* * Reductions are canonicalized so that the last operand is the reduction * operand. If this places a constant into RHS1, this decanonicalizes * GIMPLE for other phases, so we must track when this has occurred and * fix it up. */ bool operands_swapped; /* * True if there are no loop carried data dependencies in the loop. If * loop->safelen <= 1, then this is always true, either the loop didn't * have any loop carried data dependencies, or the loop is being * vectorized guarded with some runtime alias checks, or couldn't be * vectorized at all, but then this field shouldn't be used. For * loop->safelen >= 2, the user has asserted that there are no backward * dependencies, but there still could be loop carried forward * dependencies in such loops. This flag will be false if normal * vectorizer data dependency analysis would fail or require versioning * for alias, but because of loop->safelen >= 2 it has been vectorized * even without versioning for alias. E.g. in: #pragma omp simd for (int * i = 0; i < m; i++) a[i] = a[i + k] * c; (or #pragma simd or #pragma * ivdep) we can vectorize this and it will DTRT even for k > 0 && k < m, * but without safelen we would not vectorize this, so this field would * be false. */ bool no_data_dependencies; /* Mark loops having masked stores. */ bool has_mask_store; /* * If if-conversion versioned this loop before conversion, this is the * loop version without if-conversion. */ struct loop *scalar_loop; /* * For loops being epilogues of already vectorized loops this points to * the original vectorized loop. Otherwise NULL. */ _loop_vec_info *orig_loop_info; } *loop_vec_info; /* Access Functions. */ #define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1 #define LOOP_VINFO_NITERS(L) (L)->num_iters /* * Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue * peeling retain total unchanged scalar loop iterations for cost model. */ #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_CAN_FULLY_MASK_P(L) (L)->can_fully_mask_p #define LOOP_VINFO_FULLY_MASKED_P(L) (L)->fully_masked_p #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor #define LOOP_VINFO_MASKS(L) (L)->masks #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters #define LOOP_VINFO_MASK_COMPARE_TYPE(L) (L)->mask_compare_type #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions #define LOOP_VINFO_REDUCTION_CHAINS(L) (L)->reduction_chains #define LOOP_VINFO_TARGET_COST_DATA(L) (L)->target_cost_data #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps #define LOOP_VINFO_OPERANDS_SWAPPED(L) (L)->operands_swapped #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec #define LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST(L) (L)->single_scalar_iteration_cost #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \ ((L)->may_misalign_stmts.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \ ((L)->comp_alias_ddrs.length () > 0 \ || (L)->check_unequal_addrs.length () > 0 \ || (L)->lower_bounds.length () > 0) #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \ (LOOP_VINFO_NITERS_ASSUMPTIONS (L)) #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \ (LOOP_VINFO_SIMD_IF_COND (L)) #define LOOP_REQUIRES_VERSIONING(L) \ (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \ || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \ || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L)) #define LOOP_VINFO_NITERS_KNOWN_P(L) \ (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0) #define LOOP_VINFO_EPILOGUE_P(L) \ (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL) #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \ (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L))) /* * Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL * value signifies success, and a NULL value signifies failure, supporting * propagating an opt_problem * describing the failure back up the call * stack. */ typedef opt_pointer_wrapper < loop_vec_info > opt_loop_vec_info; static inline loop_vec_info loop_vec_info_for_loop(struct loop *loop) { return (loop_vec_info) loop->aux; } typedef struct _bb_vec_info:public vec_info { _bb_vec_info(gimple_stmt_iterator, gimple_stmt_iterator, vec_info_shared *); ~_bb_vec_info(); basic_block bb; gimple_stmt_iterator region_begin; gimple_stmt_iterator region_end; } *bb_vec_info; #define BB_VINFO_BB(B) (B)->bb #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs #define BB_VINFO_DDRS(B) (B)->shared->ddrs #define BB_VINFO_TARGET_COST_DATA(B) (B)->target_cost_data static inline bb_vec_info vec_info_for_bb(basic_block bb) { return (bb_vec_info) bb->aux; } /*-----------------------------------------------------------------*/ /* Info on vectorized defs. */ /*-----------------------------------------------------------------*/ enum stmt_vec_info_type { undef_vec_info_type = 0, load_vec_info_type, store_vec_info_type, shift_vec_info_type, op_vec_info_type, call_vec_info_type, call_simd_clone_vec_info_type, assignment_vec_info_type, condition_vec_info_type, comparison_vec_info_type, reduc_vec_info_type, induc_vec_info_type, type_promotion_vec_info_type, type_demotion_vec_info_type, type_conversion_vec_info_type, loop_exit_ctrl_vec_info_type }; /* * Indicates whether/how a variable is used in the scope of loop/basic block. */ enum vect_relevant { vect_unused_in_scope = 0, /* The def is only used outside the loop. */ vect_used_only_live, /* * The def is in the inner loop, and the use is in the outer loop, and * the use is a reduction stmt. */ vect_used_in_outer_by_reduction, /* * The def is in the inner loop, and the use is in the outer loop (and is * not part of reduction). */ vect_used_in_outer, /* * defs that feed computations that end up (only) in a reduction. These * defs may be used by non-reduction stmts, but eventually, any * computations/values that are affected by these defs are used to * compute a reduction (i.e. don't get stored to memory, for example). We * use this to identify computations that we can change the order in * which they are computed. */ vect_used_by_reduction, vect_used_in_scope }; /* * The type of vectorization that can be applied to the stmt: regular * loop-based vectorization; pure SLP - the stmt is a part of SLP instances * and does not have uses outside SLP instances; or hybrid SLP and loop-based * - the stmt is a part of SLP instance and also must be loop-based * vectorized, since it has uses outside SLP sequences. * * In the loop context the meanings of pure and hybrid SLP are slightly * different. By saying that pure SLP is applied to the loop, we mean that we * exploit only intra-iteration parallelism in the loop; i.e., the loop can * be vectorized without doing any conceptual unrolling, cause we don't pack * together stmts from different iterations, only within a single iteration. * Loop hybrid SLP means that we exploit both intra-iteration and * inter-iteration parallelism (e.g., number of elements in the vector is 4 * and the slp-group-size is 2, in which case we don't have enough * parallelism within an iteration, so we obtain the rest of the parallelism * from subsequent iterations by unrolling the loop by 2). */ enum slp_vect_type { loop_vect = 0, pure_slp, hybrid }; /* * Says whether a statement is a load, a store of a vectorized statement * result, or a store of an invariant value. */ enum vec_load_store_type { VLS_LOAD, VLS_STORE, VLS_STORE_INVARIANT }; /* * Describes how we're going to vectorize an individual load or store, or a * group of loads or stores. */ enum vect_memory_access_type { /* An access to an invariant address. This is used only for loads. */ VMAT_INVARIANT, /* A simple contiguous access. */ VMAT_CONTIGUOUS, /* * A contiguous access that goes down in memory rather than up, with no * additional permutation. This is used only for stores of invariants. */ VMAT_CONTIGUOUS_DOWN, /* * A simple contiguous access in which the elements need to be permuted * after loading or before storing. Only used for loop vectorization; * SLP uses separate permutes. */ VMAT_CONTIGUOUS_PERMUTE, /* * A simple contiguous access in which the elements need to be reversed * after loading or before storing. */ VMAT_CONTIGUOUS_REVERSE, /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */ VMAT_LOAD_STORE_LANES, /* * An access in which each scalar element is loaded or stored * individually. */ VMAT_ELEMENTWISE, /* * A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped SLP * accesses. Each unrolled iteration uses a contiguous load or store for * the whole group, but the groups from separate iterations are combined * in the same way as for VMAT_ELEMENTWISE. */ VMAT_STRIDED_SLP, /* The access uses gather loads or scatter stores. */ VMAT_GATHER_SCATTER }; struct dr_vec_info { /* The data reference itself. */ data_reference *dr; /* The statement that contains the data reference. */ stmt_vec_info stmt; /* The misalignment in bytes of the reference, or -1 if not known. */ int misalignment; /* * The byte alignment that we'd ideally like the reference to have, and * the value that misalignment is measured against. */ poly_uint64 target_alignment; /* If true the alignment of base_decl needs to be increased. */ bool base_misaligned; tree base_decl; }; typedef struct data_reference *dr_p; struct _stmt_vec_info { enum stmt_vec_info_type type; /* * Indicates whether this stmts is part of a computation whose result is * used outside the loop. */ bool live; /* Stmt is part of some pattern (computation idiom) */ bool in_pattern_p; /* * True if the statement was created during pattern recognition as part * of the replacement for RELATED_STMT. This implies that the statement * isn't part of any basic block, although for convenience its gimple_bb * is the same as for RELATED_STMT. */ bool pattern_stmt_p; /* * Is this statement vectorizable or should it be skipped in (partial) * vectorization. */ bool vectorizable; /* The stmt to which this info struct refers to. */ gimple *stmt; /* The vec_info with respect to which STMT is vectorized. */ vec_info *vinfo; /* The vector type to be used for the LHS of this statement. */ tree vectype; /* The vectorized version of the stmt. */ stmt_vec_info vectorized_stmt; /* * The following is relevant only for stmts that contain a non-scalar * data-ref (array/pointer/struct access). A GIMPLE stmt is expected to * have at most one such data-ref. */ dr_vec_info dr_aux; /* * Information about the data-ref relative to this loop nest (the loop * that is being considered for vectorization). */ innermost_loop_behavior dr_wrt_vec_loop; /* * For loop PHI nodes, the base and evolution part of it. This makes * sure this information is still available in * vect_update_ivs_after_vectorizer where we may not be able to * re-analyze the PHI nodes evolution as peeling for the prologue loop * can make it unanalyzable. The evolution part is still correct after * peeling, but the base may have changed from the version here. */ tree loop_phi_evolution_base_unchanged; tree loop_phi_evolution_part; /* * Used for various bookkeeping purposes, generally holding a pointer to * some other stmt S that is in some way "related" to this stmt. Current * use of this field is: If this stmt is part of a pattern (i.e. the * field 'in_pattern_p' is true): S is the "pattern stmt" that represents * (and replaces) the sequence of stmts that constitutes the pattern. * Similarly, the related_stmt of the "pattern stmt" points back to this * stmt (which is the last stmt in the original sequence of stmts that * constitutes the pattern). */ stmt_vec_info related_stmt; /* * Used to keep a sequence of def stmts of a pattern stmt if such exists. * The sequence is attached to the original statement rather than the * pattern statement. */ gimple_seq pattern_def_seq; /* * List of datarefs that are known to have the same alignment as the * dataref of this stmt. */ vec < dr_p > same_align_refs; /* * Selected SIMD clone's function info. First vector element is SIMD * clone's function decl, followed by a pair of trees (base + step) for * linear arguments (pair of NULLs for other arguments). */ vec < tree > simd_clone_info; /* Classify the def of this stmt. */ enum vect_def_type def_type; /* Whether the stmt is SLPed, loop-based vectorized, or both. */ enum slp_vect_type slp_type; /* Interleaving and reduction chains info. */ /* First element in the group. */ stmt_vec_info first_element; /* Pointer to the next element in the group. */ stmt_vec_info next_element; /* The size of the group. */ unsigned int size; /* * For stores, number of stores from this group seen. We vectorize the * last one. */ unsigned int store_count; /* * For loads only, the gap from the previous load. For consecutive loads, * GAP is 1. */ unsigned int gap; /* * The minimum negative dependence distance this stmt participates in or * zero if none. */ unsigned int min_neg_dist; /* * Not all stmts in the loop need to be vectorized. e.g, the increment of * the loop induction variable and computation of array indexes. relevant * indicates whether the stmt needs to be vectorized. */ enum vect_relevant relevant; /* For loads if this is a gather, for stores if this is a scatter. */ bool gather_scatter_p; /* True if this is an access with loop-invariant stride. */ bool strided_p; /* For both loads and stores. */ bool simd_lane_access_p; /* * Classifies how the load or store is going to be implemented for loop * vectorization. */ vect_memory_access_type memory_access_type; /* For reduction loops, this is the type of reduction. */ enum vect_reduction_type v_reduc_type; /* For CONST_COND_REDUCTION, record the reduc code. */ enum tree_code const_cond_reduc_code; /* * On a reduction PHI the reduction type as detected by * vect_force_simple_reduction. */ enum vect_reduction_type reduc_type; /* * On a reduction PHI the def returned by vect_force_simple_reduction. On * the def returned by vect_force_simple_reduction the corresponding PHI. */ stmt_vec_info reduc_def; /* The number of scalar stmt references from active SLP instances. */ unsigned int num_slp_uses; /* * If nonzero, the lhs of the statement could be truncated to this many * bits without affecting any users of the result. */ unsigned int min_output_precision; /* * If nonzero, all non-boolean input operands have the same precision, * and they could each be truncated to this many bits without changing * the result. */ unsigned int min_input_precision; /* * If OPERATION_BITS is nonzero, the statement could be performed on an * integer with the sign and number of bits given by OPERATION_SIGN and * OPERATION_BITS without changing the result. */ unsigned int operation_precision; signop operation_sign; }; /* Information about a gather/scatter call. */ struct gather_scatter_info { /* * The internal function to use for the gather/scatter operation, or * IFN_LAST if a built-in function should be used instead. */ internal_fn ifn; /* * The FUNCTION_DECL for the built-in gather/scatter function, or null if * an internal function should be used instead. */ tree decl; /* The loop-invariant base value. */ tree base; /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */ tree offset; /* * Each offset element should be multiplied by this amount before being * added to the base. */ int scale; /* The definition type for the vectorized offset. */ enum vect_def_type offset_dt; /* The type of the vectorized offset. */ tree offset_vectype; /* The type of the scalar elements after loading or before storing. */ tree element_type; /* The type of the scalar elements being loaded or stored. */ tree memory_type; }; /* Access Functions. */ #define STMT_VINFO_TYPE(S) (S)->type #define STMT_VINFO_STMT(S) (S)->stmt inline loop_vec_info STMT_VINFO_LOOP_VINFO(stmt_vec_info stmt_vinfo) { if (loop_vec_info loop_vinfo = dyn_cast < loop_vec_info > (stmt_vinfo->vinfo)) return loop_vinfo; return NULL; } inline bb_vec_info STMT_VINFO_BB_VINFO(stmt_vec_info stmt_vinfo) { if (bb_vec_info bb_vinfo = dyn_cast < bb_vec_info > (stmt_vinfo->vinfo)) return bb_vinfo; return NULL; } #define STMT_VINFO_RELEVANT(S) (S)->relevant #define STMT_VINFO_LIVE_P(S) (S)->live #define STMT_VINFO_VECTYPE(S) (S)->vectype #define STMT_VINFO_VEC_STMT(S) (S)->vectorized_stmt #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0) #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p #define STMT_VINFO_STRIDED_P(S) (S)->strided_p #define STMT_VINFO_MEMORY_ACCESS_TYPE(S) (S)->memory_access_type #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p #define STMT_VINFO_VEC_REDUCTION_TYPE(S) (S)->v_reduc_type #define STMT_VINFO_VEC_CONST_COND_REDUC_CODE(S) (S)->const_cond_reduc_code #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \ (S)->dr_wrt_vec_loop.base_misalignment #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.offset_alignment #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \ (S)->dr_wrt_vec_loop.step_alignment #define STMT_VINFO_DR_INFO(S) \ (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux) #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_GROUPED_ACCESS(S) \ ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S)) #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_NUM_SLP_USES(S) (S)->num_slp_uses #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define DR_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element) #define DR_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element) #define DR_GROUP_SIZE(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->size) #define DR_GROUP_STORE_COUNT(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count) #define DR_GROUP_GAP(S) \ (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap) #define REDUC_GROUP_FIRST_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element) #define REDUC_GROUP_NEXT_ELEMENT(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element) #define REDUC_GROUP_SIZE(S) \ (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size) #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define HYBRID_SLP_STMT(S) ((S)->slp_type == hybrid) #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp) #define STMT_SLP_TYPE(S) (S)->slp_type #define VECT_MAX_COST 1000 /* * The maximum number of intermediate steps required in multi-step type * conversion. */ #define MAX_INTERM_CVT_STEPS 3 #define MAX_VECTORIZATION_FACTOR INT_MAX /* * Nonzero if TYPE represents a (scalar) boolean type or type in the * middle-end compatible with it (unsigned precision 1 integral types). Used * to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P. */ #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \ (TREE_CODE (TYPE) == BOOLEAN_TYPE \ || ((TREE_CODE (TYPE) == INTEGER_TYPE \ || TREE_CODE (TYPE) == ENUMERAL_TYPE) \ && TYPE_PRECISION (TYPE) == 1 \ && TYPE_UNSIGNED (TYPE))) static inline bool nested_in_vect_loop_p(struct loop *loop, stmt_vec_info stmt_info) { return (loop->inner && (loop->inner == (gimple_bb(stmt_info->stmt))->loop_father)); } /* * Return TRUE if a statement represented by STMT_INFO is a part of a * pattern. */ static inline bool is_pattern_stmt_p(stmt_vec_info stmt_info) { return stmt_info->pattern_stmt_p; } /* * If STMT_INFO is a pattern statement, return the statement that it * replaces, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_orig_stmt(stmt_vec_info stmt_info) { if (is_pattern_stmt_p(stmt_info)) return STMT_VINFO_RELATED_STMT(stmt_info); return stmt_info; } /* Return the later statement between STMT1_INFO and STMT2_INFO. */ static inline stmt_vec_info get_later_stmt(stmt_vec_info stmt1_info, stmt_vec_info stmt2_info) { if (gimple_uid(vect_orig_stmt(stmt1_info)->stmt) > gimple_uid(vect_orig_stmt(stmt2_info)->stmt)) return stmt1_info; else return stmt2_info; } /* * If STMT_INFO has been replaced by a pattern statement, return the * replacement statement, otherwise return STMT_INFO itself. */ inline stmt_vec_info vect_stmt_to_vectorize(stmt_vec_info stmt_info) { if (STMT_VINFO_IN_PATTERN_P(stmt_info)) return STMT_VINFO_RELATED_STMT(stmt_info); return stmt_info; } /* Return true if BB is a loop header. */ static inline bool is_loop_header_bb_p(basic_block bb) { if (bb == (bb->loop_father)->header) return true; gcc_checking_assert(EDGE_COUNT(bb->preds) == 1); return false; } /* Return pow2 (X). */ static inline int vect_pow2(int x) { int i, res = 1; for (i = 0; i < x; i++) res *= 2; return res; } /* Alias targetm.vectorize.builtin_vectorization_cost. */ static inline int builtin_vectorization_cost(enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign) { return targetm.vectorize.builtin_vectorization_cost(type_of_cost, vectype, misalign); } /* Get cost by calling cost target builtin. */ static inline int vect_get_stmt_cost(enum vect_cost_for_stmt type_of_cost) { return builtin_vectorization_cost(type_of_cost, NULL, 0); } /* Alias targetm.vectorize.init_cost. */ static inline void * init_cost(struct loop *loop_info) { return targetm.vectorize.init_cost(loop_info); } extern void dump_stmt_cost(FILE *, void *, int, enum vect_cost_for_stmt, stmt_vec_info, int, unsigned, enum vect_cost_model_location); /* Alias targetm.vectorize.add_stmt_cost. */ static inline unsigned add_stmt_cost(void *data, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where) { unsigned cost = targetm.vectorize.add_stmt_cost(data, count, kind, stmt_info, misalign, where); if (dump_file && (dump_flags & TDF_DETAILS)) dump_stmt_cost(dump_file, data, count, kind, stmt_info, misalign, cost, where); return cost; } /* Alias targetm.vectorize.finish_cost. */ static inline void finish_cost(void *data, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost) { targetm.vectorize.finish_cost(data, prologue_cost, body_cost, epilogue_cost); } /* Alias targetm.vectorize.destroy_cost_data. */ static inline void destroy_cost_data(void *data) { targetm.vectorize.destroy_cost_data(data); } inline void add_stmt_costs(void *data, stmt_vector_for_cost * cost_vec) { stmt_info_for_cost *cost; unsigned i; FOR_EACH_VEC_ELT(*cost_vec, i, cost) add_stmt_cost(data, cost->count, cost->kind, cost->stmt_info, cost->misalign, cost->where); } /*-----------------------------------------------------------------*/ /* Info on data references alignment. */ /*-----------------------------------------------------------------*/ #define DR_MISALIGNMENT_UNKNOWN (-1) #define DR_MISALIGNMENT_UNINITIALIZED (-2) inline void set_dr_misalignment(dr_vec_info * dr_info, int val) { dr_info->misalignment = val; } inline int dr_misalignment(dr_vec_info * dr_info) { int misalign = dr_info->misalignment; gcc_assert(misalign != DR_MISALIGNMENT_UNINITIALIZED); return misalign; } /* * Reflects actual alignment of first access in the vectorized loop, taking * into account peeling/versioning if applied. */ #define DR_MISALIGNMENT(DR) dr_misalignment (DR) #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL) /* Only defined once DR_MISALIGNMENT is defined. */ #define DR_TARGET_ALIGNMENT(DR) ((DR)->target_alignment) /* * Return true if data access DR_INFO is aligned to its target alignment * (which may be less than a full vector). */ static inline bool aligned_access_p(dr_vec_info * dr_info) { return (DR_MISALIGNMENT(dr_info) == 0); } /* * Return TRUE if the alignment of the data access is known, and FALSE * otherwise. */ static inline bool known_alignment_for_access_p(dr_vec_info * dr_info) { return (DR_MISALIGNMENT(dr_info) != DR_MISALIGNMENT_UNKNOWN); } /* * Return the minimum alignment in bytes that the vectorized version of * DR_INFO is guaranteed to have. */ static inline unsigned int vect_known_alignment_in_bytes(dr_vec_info * dr_info) { if (DR_MISALIGNMENT(dr_info) == DR_MISALIGNMENT_UNKNOWN) return TYPE_ALIGN_UNIT(TREE_TYPE(DR_REF(dr_info->dr))); if (DR_MISALIGNMENT(dr_info) == 0) return known_alignment(DR_TARGET_ALIGNMENT(dr_info)); return DR_MISALIGNMENT(dr_info) & -DR_MISALIGNMENT(dr_info); } /* * Return the behavior of DR_INFO with respect to the vectorization context * (which for outer loop vectorization might not be the behavior recorded in * DR_INFO itself). */ static inline innermost_loop_behavior * vect_dr_behavior(dr_vec_info * dr_info) { stmt_vec_info stmt_info = dr_info->stmt; loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO(stmt_info); if (loop_vinfo == NULL || !nested_in_vect_loop_p(LOOP_VINFO_LOOP(loop_vinfo), stmt_info)) return &DR_INNERMOST(dr_info->dr); else return &STMT_VINFO_DR_WRT_VEC_LOOP(stmt_info); } /* Return true if the vect cost model is unlimited. */ static inline bool unlimited_cost_model(loop_p loop) { if (loop != NULL && loop->force_vectorize && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT) return flag_simd_cost_model == VECT_COST_MODEL_UNLIMITED; return (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED); } /* * Return true if the loop described by LOOP_VINFO is fully-masked and if the * first iteration should use a partial mask in order to achieve alignment. */ static inline bool vect_use_loop_mask_for_alignment_p(loop_vec_info loop_vinfo) { return (LOOP_VINFO_FULLY_MASKED_P(loop_vinfo) && LOOP_VINFO_PEELING_FOR_ALIGNMENT(loop_vinfo)); } /* * Return the number of vectors of type VECTYPE that are needed to get NUNITS * elements. NUNITS should be based on the vectorization factor, so it is * always a known multiple of the number of elements in VECTYPE. */ static inline unsigned int vect_get_num_vectors(poly_uint64 nunits, tree vectype) { return exact_div(nunits, TYPE_VECTOR_SUBPARTS(vectype)).to_constant(); } /* * Return the number of copies needed for loop vectorization when a statement * operates on vectors of type VECTYPE. This is the vectorization factor * divided by the number of elements in VECTYPE and is always known at * compile time. */ static inline unsigned int vect_get_num_copies(loop_vec_info loop_vinfo, tree vectype) { return vect_get_num_vectors(LOOP_VINFO_VECT_FACTOR(loop_vinfo), vectype); } /* * Update maximum unit count *MAX_NUNITS so that it accounts for the number * of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet * recorded any vector types. */ static inline void vect_update_max_nunits(poly_uint64 * max_nunits, tree vectype) { /* * All unit counts have the form current_vector_size * X for some * rational X, so two unit sizes must have a common multiple. Everything * is a multiple of the initial value of 1. */ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS(vectype); *max_nunits = force_common_multiple(*max_nunits, nunits); } /* * Return the vectorization factor that should be used for costing purposes * while vectorizing the loop described by LOOP_VINFO. Pick a reasonable * estimate if the vectorization factor isn't known at compile time. */ static inline unsigned int vect_vf_for_cost(loop_vec_info loop_vinfo) { return estimated_poly_value(LOOP_VINFO_VECT_FACTOR(loop_vinfo)); } /* * Estimate the number of elements in VEC_TYPE for costing purposes. Pick a * reasonable estimate if the exact number isn't known at compile time. */ static inline unsigned int vect_nunits_for_cost(tree vec_type) { return estimated_poly_value(TYPE_VECTOR_SUBPARTS(vec_type)); } /* Return the maximum possible vectorization factor for LOOP_VINFO. */ static inline unsigned HOST_WIDE_INT vect_max_vf(loop_vec_info loop_vinfo) { unsigned HOST_WIDE_INT vf; if (LOOP_VINFO_VECT_FACTOR(loop_vinfo).is_constant(&vf)) return vf; return MAX_VECTORIZATION_FACTOR; } /* * Return the size of the value accessed by unvectorized data reference * DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated * for the associated gimple statement, since that guarantees that DR_INFO * accesses either a scalar or a scalar equivalent. ("Scalar equivalent" * here includes things like V1SI, which can be vectorized in the same way as * a plain SI.) */ inline unsigned int vect_get_scalar_dr_size(dr_vec_info * dr_info) { return tree_to_uhwi(TYPE_SIZE_UNIT(TREE_TYPE(DR_REF(dr_info->dr)))); } /* Source location + hotness information. */ extern dump_user_location_t vect_location; /* * A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII * object, thus printing "=== MSG ===\n" to the dumpfile etc, and then * calling dump_end_scope (); once the object goes out of scope, thus * capturing the nesting of the scopes. * * These scopes affect dump messages within them: dump messages at the top level * implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested * scope implicitly default to MSG_PRIORITY_INTERNALS. */ #define DUMP_VECT_SCOPE(MSG) \ AUTO_DUMP_SCOPE (MSG, vect_location) /* * A sentinel class for ensuring that the "vect_location" global gets reset * at the end of a scope. * * The "vect_location" global is used during dumping and contains a location_t, * which could contain references to a tree block via the ad-hoc data. This * data is used for tracking inlining information, but it's not a GC root; * it's simply assumed that such locations never get accessed if the blocks * are optimized away. * * Hence we need to ensure that such locations are purged at the end of any * operations using them (e.g. via this class). */ class auto_purge_vect_location { public: ~auto_purge_vect_location(); }; /*-----------------------------------------------------------------*/ /* Function prototypes. */ /*-----------------------------------------------------------------*/ /* * Simple loop peeling and versioning utilities for vectorizer's purposes - * in tree-vect-loop-manip.c. */ extern void vect_set_loop_condition(struct loop *, loop_vec_info, tree, tree, tree, bool); extern bool slpeel_can_duplicate_loop_p(const struct loop *, const_edge); struct loop * slpeel_tree_duplicate_loop_to_edge_cfg(struct loop *, struct loop *, edge); struct loop * vect_loop_versioning(loop_vec_info, unsigned int, bool, poly_uint64); extern struct loop * vect_do_peeling(loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool); extern void vect_prepare_for_masked_peels(loop_vec_info); extern dump_user_location_t find_loop_location(struct loop *); extern bool vect_can_advance_ivs_p(loop_vec_info); /* In tree-vect-stmts.c. */ extern poly_uint64 current_vector_size; extern tree get_vectype_for_scalar_type(tree); extern tree get_vectype_for_scalar_type_and_size(tree, poly_uint64); extern tree get_mask_type_for_scalar_type(tree); extern tree get_same_sized_vectype(tree, tree); extern bool vect_get_loop_mask_type(loop_vec_info); extern bool vect_is_simple_use(tree, vec_info *, enum vect_def_type *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool vect_is_simple_use(tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info * = NULL, gimple ** = NULL); extern bool supportable_widening_operation(enum tree_code, stmt_vec_info, tree, tree, enum tree_code *, enum tree_code *, int *, vec < tree > *); extern bool supportable_narrowing_operation(enum tree_code, tree, tree, enum tree_code *, int *, vec < tree > *); extern unsigned record_stmt_cost(stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, int, enum vect_cost_model_location); extern stmt_vec_info vect_finish_replace_stmt(stmt_vec_info, gimple *); extern stmt_vec_info vect_finish_stmt_generation(stmt_vec_info, gimple *, gimple_stmt_iterator *); extern opt_result vect_mark_stmts_to_be_vectorized(loop_vec_info); extern tree vect_get_store_rhs(stmt_vec_info); extern tree vect_get_vec_def_for_operand_1(stmt_vec_info, enum vect_def_type); extern tree vect_get_vec_def_for_operand(tree, stmt_vec_info, tree = NULL); extern void vect_get_vec_defs(tree, tree, stmt_vec_info, vec < tree > *, vec < tree > *, slp_tree); extern void vect_get_vec_defs_for_stmt_copy(vec_info *, vec < tree > *, vec < tree > *); extern tree vect_init_vector(stmt_vec_info, tree, tree, gimple_stmt_iterator *); extern tree vect_get_vec_def_for_stmt_copy(vec_info *, tree); extern bool vect_transform_stmt(stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance); extern void vect_remove_stores(stmt_vec_info); extern opt_result vect_analyze_stmt(stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_condition(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, bool, slp_tree, stmt_vector_for_cost *); extern bool vectorizable_shift(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern void vect_get_load_cost(stmt_vec_info, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool); extern void vect_get_store_cost(stmt_vec_info, int, unsigned int *, stmt_vector_for_cost *); extern bool vect_supportable_shift(enum tree_code, tree); extern tree vect_gen_perm_mask_any(tree, const vec_perm_indices &); extern tree vect_gen_perm_mask_checked(tree, const vec_perm_indices &); extern void optimize_mask_stores(struct loop *); extern gcall *vect_gen_while(tree, tree, tree); extern tree vect_gen_while_not(gimple_seq *, tree, tree, tree); extern opt_result vect_get_vector_types_for_stmt(stmt_vec_info, tree *, tree *); extern opt_tree vect_get_mask_type_for_stmt(stmt_vec_info); /* In tree-vect-data-refs.c. */ extern bool vect_can_force_dr_alignment_p(const_tree, poly_uint64); extern enum dr_alignment_support vect_supportable_dr_alignment (dr_vec_info *, bool); extern tree vect_get_smallest_scalar_type(stmt_vec_info, HOST_WIDE_INT *, HOST_WIDE_INT *); extern opt_result vect_analyze_data_ref_dependences(loop_vec_info, unsigned int *); extern bool vect_slp_analyze_instance_dependence(slp_instance); extern opt_result vect_enhance_data_refs_alignment(loop_vec_info); extern opt_result vect_analyze_data_refs_alignment(loop_vec_info); extern opt_result vect_verify_datarefs_alignment(loop_vec_info); extern bool vect_slp_analyze_and_verify_instance_alignment(slp_instance); extern opt_result vect_analyze_data_ref_accesses(vec_info *); extern opt_result vect_prune_runtime_alias_test_list(loop_vec_info); extern bool vect_gather_scatter_fn_p(bool, bool, tree, tree, unsigned int, signop, int, internal_fn *, tree *); extern bool vect_check_gather_scatter(stmt_vec_info, loop_vec_info, gather_scatter_info *); extern opt_result vect_find_stmt_data_reference(loop_p, gimple *, vec < data_reference_p > *); extern opt_result vect_analyze_data_refs(vec_info *, poly_uint64 *); extern void vect_record_base_alignments(vec_info *); extern tree vect_create_data_ref_ptr(stmt_vec_info, tree, struct loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree = NULL_TREE, tree = NULL_TREE); extern tree bump_vector_ptr(tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree); extern void vect_copy_ref_info(tree, tree); extern tree vect_create_destination_var(tree, tree); extern bool vect_grouped_store_supported(tree, unsigned HOST_WIDE_INT); extern bool vect_store_lanes_supported(tree, unsigned HOST_WIDE_INT, bool); extern bool vect_grouped_load_supported(tree, bool, unsigned HOST_WIDE_INT); extern bool vect_load_lanes_supported(tree, unsigned HOST_WIDE_INT, bool); extern void vect_permute_store_chain(vec < tree >, unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec < tree > *); extern tree vect_setup_realignment(stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, struct loop **); extern void vect_transform_grouped_load(stmt_vec_info, vec < tree >, int, gimple_stmt_iterator *); extern void vect_record_grouped_load_vectors(stmt_vec_info, vec < tree >); extern tree vect_get_new_vect_var(tree, enum vect_var_kind, const char *); extern tree vect_get_new_ssa_name(tree, enum vect_var_kind, const char *= NULL); extern tree vect_create_addr_base_for_vector_ref(stmt_vec_info, gimple_seq *, tree, tree = NULL_TREE); /* In tree-vect-loop.c. */ /* FORNOW: Used in tree-parloops.c. */ extern stmt_vec_info vect_force_simple_reduction(loop_vec_info, stmt_vec_info, bool *, bool); /* Used in gimple-loop-interchange.c. */ extern bool check_reduction_path(dump_user_location_t, loop_p, gphi *, tree, enum tree_code); /* Drive for loop analysis stage. */ extern opt_loop_vec_info vect_analyze_loop(struct loop *, loop_vec_info, vec_info_shared *); extern tree vect_build_loop_niters(loop_vec_info, bool * = NULL); extern void vect_gen_vector_loop_niters(loop_vec_info, tree, tree *, tree *, bool); extern tree vect_halve_mask_nunits(tree); extern tree vect_double_mask_nunits(tree); extern void vect_record_loop_mask(loop_vec_info, vec_loop_masks *, unsigned int, tree); extern tree vect_get_loop_mask(gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int); /* Drive for loop transformation stage. */ extern struct loop *vect_transform_loop(loop_vec_info); extern opt_loop_vec_info vect_analyze_loop_form(struct loop *, vec_info_shared *); extern bool vectorizable_live_operation(stmt_vec_info, gimple_stmt_iterator *, slp_tree, int, stmt_vec_info *, stmt_vector_for_cost *); extern bool vectorizable_reduction(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, slp_instance, stmt_vector_for_cost *); extern bool vectorizable_induction(stmt_vec_info, gimple_stmt_iterator *, stmt_vec_info *, slp_tree, stmt_vector_for_cost *); extern tree get_initial_def_for_reduction(stmt_vec_info, tree, tree *); extern bool vect_worthwhile_without_simd_p(vec_info *, tree_code); extern int vect_get_known_peeling_cost(loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *); extern tree cse_and_gimplify_to_preheader(loop_vec_info, tree); /* In tree-vect-slp.c. */ extern void vect_free_slp_instance(slp_instance, bool); extern bool vect_transform_slp_perm_load(slp_tree, vec < tree >, gimple_stmt_iterator *, poly_uint64, slp_instance, bool, unsigned *); extern bool vect_slp_analyze_operations(vec_info *); extern void vect_schedule_slp(vec_info *); extern opt_result vect_analyze_slp(vec_info *, unsigned); extern bool vect_make_slp_decision(loop_vec_info); extern void vect_detect_hybrid_slp(loop_vec_info); extern void vect_get_slp_defs(vec < tree >, slp_tree, vec < vec < tree > >*); extern bool vect_slp_bb(basic_block); extern stmt_vec_info vect_find_last_scalar_stmt_in_slp(slp_tree); extern bool is_simple_and_all_uses_invariant(stmt_vec_info, loop_vec_info); extern bool can_duplicate_and_interleave_p(unsigned int, machine_mode, unsigned int *= NULL, tree * = NULL, tree * = NULL); extern void duplicate_and_interleave(gimple_seq *, tree, vec < tree >, unsigned int, vec < tree > &); extern int vect_get_place_in_interleaving_chain(stmt_vec_info, stmt_vec_info); /* In tree-vect-patterns.c. */ /* * Pattern recognition functions. Additional pattern recognition functions * can (and will) be added in the future. */ void vect_pattern_recog(vec_info *); /* In tree-vectorizer.c. */ unsigned vectorize_loops(void); void vect_free_loop_info_assumptions(struct loop *); #endif /* GCC_TREE_VECTORIZER_H */
GB_sort_template.c
//------------------------------------------------------------------------------ // GB_sort_template: sort all vectors in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // macros: // GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend, // GB_msort_ISO_ascend or _descend, // or GB_msort_func_UDT // GB_TYPE bool, int8_, ... or GB_void for UDT or ISO // GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise // GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in // GB_GET(x,X,i) x = X [i] for built-in, memcpy for UDT // GB_COPY(A,i,C,k) A[i] = C [k] // GB_SWAP(A,i,k) swap A[i] and A[k] // GB_LT compare two entries, x < y, or x > y for descending sort //------------------------------------------------------------------------------ // GB_SORT (partition): use a pivot to partition an array //------------------------------------------------------------------------------ // C.A.R Hoare partition method, partitions an array in-place via a pivot. // k = partition (A, n) partitions A [0:n-1] such that all entries in // A [0:k] are <= all entries in A [k+1:n-1]. static inline int64_t GB_SORT (partition) ( GB_TYPE *restrict A_0, // size n arrays to partition int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to partition uint64_t *seed // random number seed, modified on output #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { // select a pivot at random int64_t pivot = ((n < GB_RAND_MAX) ? GB_rand15 (seed) : GB_rand (seed)) % n; // Pivot = A [pivot] GB_GET (Pivot0, A_0, pivot) ; // Pivot0 = A_0 [pivot] int64_t Pivot1 = A_1 [pivot] ; // At the top of the while loop, A [left+1...right-1] is considered, and // entries outside this range are in their proper place and not touched. // Since the input specification of this function is to partition A // [0..n-1], left must start at -1 and right must start at n. int64_t left = -1 ; int64_t right = n ; // keep partitioning until the left and right sides meet while (true) { // loop invariant: A [0..left] < pivot and A [right..n-1] > Pivot, // so the region to be considered is A [left+1 ... right-1]. // increment left until finding an entry A [left] >= Pivot bool less ; do { left++ ; // a0 = A_0 [left] GB_GET (a0, A_0, left) ; // less = (a0, A_1 [left]) < (Pivot0, Pivot1) GB_LT (less, a0, A_1 [left], Pivot0, Pivot1) ; } while (less) ; // decrement right until finding an entry A [right] <= Pivot do { right-- ; // a0 = A_0 [right] GB_GET (a1, A_0, right) ; // less = (Pivot0, Pivot1) < (a1, A_1 [right]) GB_LT (less, Pivot0, Pivot1, a1, A_1 [right]) ; } while (less) ; // now A [0..left-1] < pivot and A [right+1..n-1] > pivot, but // A [left] > pivot and A [right] < pivot, so these two entries // are out of place and must be swapped. // However, if the two sides have met, the partition is finished. if (left >= right) { // A has been partitioned into A [0:right] and A [right+1:n-1]. // k = right+1, so A is split into A [0:k-1] and A [k:n-1]. return (right + 1) ; } // since A [left] > pivot and A [right] < pivot, swap them GB_SWAP (A_0, left, right) ; int64_t t1 = A_1 [left] ; A_1 [left] = A_1 [right] ; A_1 [right] = t1 ; // after the swap this condition holds: // A [0..left] < pivot and A [right..n-1] > pivot } } //------------------------------------------------------------------------------ // GB_SORT (quicksort): recursive single-threaded quicksort //------------------------------------------------------------------------------ static void GB_SORT (quicksort) // sort A [0:n-1] ( GB_TYPE *restrict A_0, // size n arrays to sort int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to sort uint64_t *seed // random number seed #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { if (n < 20) { // in-place insertion sort on A [0:n-1], where n is small for (int64_t k = 1 ; k < n ; k++) { for (int64_t j = k ; j > 0 ; j--) { // a0 = A_0 [j] GB_GET (a0, A_0, j) ; // a1 = A_0 [j-1] GB_GET (a1, A_0, j-1) ; // break if A [j] >= A [j-1] bool less ; // less = (a0, A_1 [j]) < (a1, A_1 [j-1]) GB_LT (less, a0, A_1 [j], a1, A_1 [j-1]) ; if (!less) break ; // swap A [j-1] and A [j] GB_SWAP (A_0, j-1, j) ; int64_t t1 = A_1 [j-1] ; A_1 [j-1] = A_1 [j] ; A_1 [j] = t1 ; } } } else { // partition A [0:n-1] into A [0:k-1] and A [k:n-1] int64_t k = GB_SORT (partition) (A_0, A_1, n, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort each partition // sort A [0:k-1] GB_SORT (quicksort) (A_0, A_1, k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort A [k:n-1] GB_SORT (quicksort) (GB_ADDR (A_0, k), A_1 + k, n-k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (binary_search): binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t GB_SORT (binary_search) // return pleft ( const GB_TYPE *restrict Y_0, // Pivot is Y [pivot] const int64_t *restrict Y_1, const int64_t pivot, const GB_TYPE *restrict X_0, // search in X [p_start..p_end_-1] const int64_t *restrict X_1, const int64_t p_start, const int64_t p_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; GB_GET (Pivot0, Y_0, pivot) ; // Pivot0 = Y_0 [pivot] int64_t Pivot1 = Y_1 [pivot] ; bool less ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // x0 = X_0 [pmiddle] GB_GET (x0, X_0, pmiddle) ; // less = (x0, X_1 [pmiddle]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pmiddle], Pivot0, Pivot1) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && (X_1 [pleft] == Pivot1) ; // Modify pleft and pright: if (!found && (pleft == pright)) { // x0 = X_0 [pleft] GB_GET (x0, X_0, pleft) ; // less = (x0, X_1 [pleft]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pleft], Pivot0, Pivot1) ; if (less) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // GB_SORT (create_merge_tasks) //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. static void GB_SORT (create_merge_tasks) ( // output: int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const GB_TYPE *restrict L_0, // Left = L [pL_start...pL_end-1] const int64_t *restrict L_1, const int64_t pL_start, const int64_t pL_end, const GB_TYPE *restrict R_0, // Right = R [pR_start...pR_end-1] const int64_t *restrict R_1, const int64_t pR_start, const int64_t pR_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = GB_SORT (binary_search) ( L_0, L_1, pleft, R_0, R_1, pR_start, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = GB_SORT (binary_search) ( R_0, R_1, pright, L_0, L_1, pL_start, pL_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = GB_IMAX (ntasks0, 1) ; ntasks0 = GB_IMIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, L_1, pL_start, pleft, R_0, R_1, pR_start, pright #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, L_1, pleft, pL_end, R_0, R_1, pright, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (merge): merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void GB_SORT (merge) ( GB_TYPE *restrict S_0, // output of length nleft + nright int64_t *restrict S_1, const GB_TYPE *restrict Left_0, // left input of length nleft const int64_t *restrict Left_1, const int64_t nleft, const GB_TYPE *restrict Right_0, // right input of length nright const int64_t *restrict Right_1, const int64_t nright #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { // left0 = Left_0 [pleft] GB_GET (left0, Left_0, pleft) ; // right0 = Right_0 [pright] GB_GET (right0, Right_0, pright) ; bool less ; // less = (left0, Left_1 [pleft]) < (right0, Right_1 [pright]) GB_LT (less, left0, Left_1 [pleft], right0, Right_1 [pright]) ; if (less) { // S [p] = Left [pleft++] GB_COPY (S_0, p, Left_0, pleft) ; S_1 [p] = Left_1 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] GB_COPY (S_0, p, Right_0, pright) ; S_1 [p] = Right_1 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Left_0, pleft), nremaining * GB_SIZE) ; memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Right_0, pright), nremaining * GB_SIZE) ; memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // GB_SORT (vector) parallel mergesort of a single vector //------------------------------------------------------------------------------ static void GB_SORT (vector) // sort the pair of arrays A_0, A_1 ( GB_TYPE *restrict A_0, // size n array int64_t *restrict A_1, // size n array GB_TYPE *restrict W_0, // workspace of size n * GB_SIZE bytes int64_t *restrict W, // int64_t workspace of size n+6*ntasks+1 const int64_t n, const int kk, const int ntasks, const int nthreads // # of threads to use #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // split up workspace //-------------------------------------------------------------------------- ASSERT (nthreads > 2 && n >= GB_BASECASE) ; int64_t *T = W ; int64_t *restrict W_1 = T ; T += n ; int64_t *restrict L_task = T ; T += ntasks ; int64_t *restrict L_len = T ; T += ntasks ; int64_t *restrict R_task = T ; T += ntasks ; int64_t *restrict R_len = T ; T += ntasks ; int64_t *restrict S_task = T ; T += ntasks ; int64_t *restrict Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- GB_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; uint64_t seed = tid ; GB_SORT (quicksort) (GB_ADDR (A_0, leaf), A_1 + leaf, leafsize, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for (int k = kk ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, A_1, Slice [tid], Slice [tid+nt], A_0, A_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (W_0, pS), W_1 + pS, GB_ADDR (A_0, pL), A_1 + pL, nL, GB_ADDR (A_0, pR), A_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, W_1, Slice [tid], Slice [tid+nt], W_0, W_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (A_0, pS), A_1 + pS, GB_ADDR (W_0, pL), W_1 + pL, nL, GB_ADDR (W_0, pR), W_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; } } //------------------------------------------------------------------------------ // sort all vectors in a matrix //------------------------------------------------------------------------------ #undef GB_FREE_WORKSPACE #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Werk, int64_t) ; \ GB_FREE_WORK (&C_skipped, C_skipped_size) ; \ GB_FREE_WORK (&W_0, W_0_size) ; \ GB_FREE_WORK (&W, W_size) ; \ } static GrB_Info GB_SORT (matrix) ( GrB_Matrix C, // matrix sorted in-place #if GB_SORT_UDT GrB_BinaryOp op, // comparator for user-defined types only #endif GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C to sort", GB0) ; ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (GB_IS_SPARSE (C) || GB_IS_HYPERSPARSE (C)) ; #if GB_SORT_UDT ASSERT_BINARYOP_OK (op, "op", GB0) ; ASSERT (op->ztype == GrB_BOOL) ; ASSERT (op->xtype == op->ytype) ; #endif int64_t cnz = GB_nnz (C) ; if (C->iso || cnz <= 1) { // nothing to do return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // get input //-------------------------------------------------------------------------- int64_t cnvec = C->nvec ; int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; GB_TYPE *restrict Cx = (GB_TYPE *) C->x ; // workspace GB_TYPE *restrict W_0 = NULL ; size_t W_0_size = 0 ; int64_t *restrict W = NULL ; size_t W_size = 0 ; int64_t *restrict C_skipped = NULL ; size_t C_skipped_size = 0 ; GB_WERK_DECLARE (Werk, int64_t) ; #if GB_SORT_UDT // get typesize, and function pointers for operators and typecasting GrB_Type ctype = C->type ; size_t csize = ctype->size ; size_t xsize = op->xtype->size ; GxB_binary_function flt = op->binop_function ; GB_cast_function fcast = GB_cast_factory (op->xtype->code, ctype->code) ; #endif //========================================================================== // phase1: sort all short vectors //========================================================================== // slice the C matrix into tasks for phase 1 GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (cnz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (ntasks, cnvec) ; ntasks = GB_IMAX (ntasks, 1) ; // printf ("phase1: threads %d tasks %d\n", nthreads, ntasks) ; GB_WERK_PUSH (Werk, 3*ntasks + 2, int64_t) ; if (Werk == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict C_max = Werk ; // size ntasks int64_t *restrict C_skip = Werk + ntasks ; // size ntasks+1 int64_t *restrict C_slice = Werk + 2*ntasks + 1; // size ntasks+1 GB_pslice (C_slice, Cp, cnvec, ntasks, false) ; // sort all short vectors in parallel, one thread per vector int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t task_max_length = 0 ; int64_t n_skipped = 0 ; for (int64_t k = kfirst ; k < klast ; k++) { // sort the vector C(:,k), unless it is too long const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz <= GB_BASECASE || nthreads == 1) { // printf ("\n------------sort: %ld cknz %ld\n", k, cknz) ; uint64_t seed = k ; GB_SORT (quicksort) (GB_ADDR (Cx, pC_start), Ci + pC_start, cknz, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // printf ("\n------------skip: %ld cknz %ld\n", k, cknz) ; n_skipped++ ; } task_max_length = GB_IMAX (task_max_length, cknz) ; } C_max [tid] = task_max_length ; C_skip [tid] = n_skipped ; } // find max vector length and return if all vectors are now sorted int64_t max_length = 0 ; for (tid = 0 ; tid < ntasks ; tid++) { max_length = GB_IMAX (max_length, C_max [tid]) ; } if (max_length <= GB_BASECASE || nthreads == 1) { // all vectors are sorted GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; } //========================================================================== // phase2: sort all long vectors in parallel //========================================================================== //-------------------------------------------------------------------------- // construct a list of vectors that must still be sorted //-------------------------------------------------------------------------- GB_cumsum (C_skip, ntasks, NULL, 1, Context) ; int64_t total_skipped = C_skip [ntasks] ; C_skipped = GB_MALLOC_WORK (total_skipped, int64_t, &C_skipped_size) ; if (C_skipped == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t n_skipped = C_skip [tid] ; for (int64_t k = kfirst ; k < klast ; k++) { const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz > GB_BASECASE) { // C(:,k) was not sorted C_skipped [n_skipped++] = k ; } } } //-------------------------------------------------------------------------- // determine # of tasks for each vector in phase 2 //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 quicksort leaves // 5 to 16 threads: 6 levels, 64 quicksort leaves // 17 to 64 threads: 8 levels, 256 quicksort leaves // 65 to 256 threads: 10 levels, 1024 quicksort leaves // 256 to 1024 threads: 12 levels, 4096 quicksort leaves // ... int kk = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks2 = 1 << kk ; // printf ("phase2: threads %d tasks %d skipped %ld\n", nthreads, ntasks2, // total_skipped) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- W = GB_MALLOC_WORK (max_length + 6*ntasks2 + 1, int64_t, &W_size) ; W_0 = (GB_TYPE *) GB_MALLOC_WORK (max_length * GB_SIZE, GB_void, &W_0_size) ; if (W == NULL || W_0 == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // sort each long vector using all available threads //-------------------------------------------------------------------------- for (int64_t t = 0 ; t < total_skipped ; t++) { const int64_t k = C_skipped [t] ; const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; ASSERT (cknz > GB_BASECASE) ; GB_SORT (vector) (GB_ADDR (Cx, pC_start), Ci + pC_start, W_0, W, cknz, kk, ntasks2, nthreads #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; C->jumbled = true ; ASSERT_MATRIX_OK (C, "C sorted by value", GB0) ; return (GrB_SUCCESS) ; } #undef GB_SORT #undef GB_TYPE
//------------------------------------------------------------------------------ // GB_sort_template: sort all vectors in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // macros: // GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend, // GB_msort_ISO_ascend or _descend, // or GB_msort_func_UDT // GB_TYPE bool, int8_, ... or GB_void for UDT or ISO // GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise // GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in // GB_GET(x,X,i) x = X [i] for built-in, memcpy for UDT // GB_COPY(A,i,C,k) A[i] = C [k] // GB_SWAP(A,i,k) swap A[i] and A[k] // GB_LT compare two entries, x < y, or x > y for descending sort //------------------------------------------------------------------------------ // GB_SORT (partition): use a pivot to partition an array //------------------------------------------------------------------------------ // C.A.R Hoare partition method, partitions an array in-place via a pivot. // k = partition (A, n) partitions A [0:n-1] such that all entries in // A [0:k] are <= all entries in A [k+1:n-1]. static inline int64_t GB_SORT (partition) ( GB_TYPE *restrict A_0, // size n arrays to partition int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to partition uint64_t *seed // random number seed, modified on output #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { // select a pivot at random int64_t pivot = ((n < GB_RAND_MAX) ? GB_rand15 (seed) : GB_rand (seed)) % n; // Pivot = A [pivot] GB_GET (Pivot0, A_0, pivot) ; // Pivot0 = A_0 [pivot] int64_t Pivot1 = A_1 [pivot] ; // At the top of the while loop, A [left+1...right-1] is considered, and // entries outside this range are in their proper place and not touched. // Since the input specification of this function is to partition A // [0..n-1], left must start at -1 and right must start at n. int64_t left = -1 ; int64_t right = n ; // keep partitioning until the left and right sides meet while (true) { // loop invariant: A [0..left] < pivot and A [right..n-1] > Pivot, // so the region to be considered is A [left+1 ... right-1]. // increment left until finding an entry A [left] >= Pivot bool less ; do { left++ ; // a0 = A_0 [left] GB_GET (a0, A_0, left) ; // less = (a0, A_1 [left]) < (Pivot0, Pivot1) GB_LT (less, a0, A_1 [left], Pivot0, Pivot1) ; } while (less) ; // decrement right until finding an entry A [right] <= Pivot do { right-- ; // a0 = A_0 [right] GB_GET (a1, A_0, right) ; // less = (Pivot0, Pivot1) < (a1, A_1 [right]) GB_LT (less, Pivot0, Pivot1, a1, A_1 [right]) ; } while (less) ; // now A [0..left-1] < pivot and A [right+1..n-1] > pivot, but // A [left] > pivot and A [right] < pivot, so these two entries // are out of place and must be swapped. // However, if the two sides have met, the partition is finished. if (left >= right) { // A has been partitioned into A [0:right] and A [right+1:n-1]. // k = right+1, so A is split into A [0:k-1] and A [k:n-1]. return (right + 1) ; } // since A [left] > pivot and A [right] < pivot, swap them GB_SWAP (A_0, left, right) ; int64_t t1 = A_1 [left] ; A_1 [left] = A_1 [right] ; A_1 [right] = t1 ; // after the swap this condition holds: // A [0..left] < pivot and A [right..n-1] > pivot } } //------------------------------------------------------------------------------ // GB_SORT (quicksort): recursive single-threaded quicksort //------------------------------------------------------------------------------ static void GB_SORT (quicksort) // sort A [0:n-1] ( GB_TYPE *restrict A_0, // size n arrays to sort int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to sort uint64_t *seed // random number seed #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { if (n < 20) { // in-place insertion sort on A [0:n-1], where n is small for (int64_t k = 1 ; k < n ; k++) { for (int64_t j = k ; j > 0 ; j--) { // a0 = A_0 [j] GB_GET (a0, A_0, j) ; // a1 = A_0 [j-1] GB_GET (a1, A_0, j-1) ; // break if A [j] >= A [j-1] bool less ; // less = (a0, A_1 [j]) < (a1, A_1 [j-1]) GB_LT (less, a0, A_1 [j], a1, A_1 [j-1]) ; if (!less) break ; // swap A [j-1] and A [j] GB_SWAP (A_0, j-1, j) ; int64_t t1 = A_1 [j-1] ; A_1 [j-1] = A_1 [j] ; A_1 [j] = t1 ; } } } else { // partition A [0:n-1] into A [0:k-1] and A [k:n-1] int64_t k = GB_SORT (partition) (A_0, A_1, n, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort each partition // sort A [0:k-1] GB_SORT (quicksort) (A_0, A_1, k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort A [k:n-1] GB_SORT (quicksort) (GB_ADDR (A_0, k), A_1 + k, n-k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (binary_search): binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t GB_SORT (binary_search) // return pleft ( const GB_TYPE *restrict Y_0, // Pivot is Y [pivot] const int64_t *restrict Y_1, const int64_t pivot, const GB_TYPE *restrict X_0, // search in X [p_start..p_end_-1] const int64_t *restrict X_1, const int64_t p_start, const int64_t p_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; GB_GET (Pivot0, Y_0, pivot) ; // Pivot0 = Y_0 [pivot] int64_t Pivot1 = Y_1 [pivot] ; bool less ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // x0 = X_0 [pmiddle] GB_GET (x0, X_0, pmiddle) ; // less = (x0, X_1 [pmiddle]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pmiddle], Pivot0, Pivot1) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && (X_1 [pleft] == Pivot1) ; // Modify pleft and pright: if (!found && (pleft == pright)) { // x0 = X_0 [pleft] GB_GET (x0, X_0, pleft) ; // less = (x0, X_1 [pleft]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pleft], Pivot0, Pivot1) ; if (less) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // GB_SORT (create_merge_tasks) //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. static void GB_SORT (create_merge_tasks) ( // output: int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const GB_TYPE *restrict L_0, // Left = L [pL_start...pL_end-1] const int64_t *restrict L_1, const int64_t pL_start, const int64_t pL_end, const GB_TYPE *restrict R_0, // Right = R [pR_start...pR_end-1] const int64_t *restrict R_1, const int64_t pR_start, const int64_t pR_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = GB_SORT (binary_search) ( L_0, L_1, pleft, R_0, R_1, pR_start, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = GB_SORT (binary_search) ( R_0, R_1, pright, L_0, L_1, pL_start, pL_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = GB_IMAX (ntasks0, 1) ; ntasks0 = GB_IMIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, L_1, pL_start, pleft, R_0, R_1, pR_start, pright #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, L_1, pleft, pL_end, R_0, R_1, pright, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (merge): merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void GB_SORT (merge) ( GB_TYPE *restrict S_0, // output of length nleft + nright int64_t *restrict S_1, const GB_TYPE *restrict Left_0, // left input of length nleft const int64_t *restrict Left_1, const int64_t nleft, const GB_TYPE *restrict Right_0, // right input of length nright const int64_t *restrict Right_1, const int64_t nright #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { // left0 = Left_0 [pleft] GB_GET (left0, Left_0, pleft) ; // right0 = Right_0 [pright] GB_GET (right0, Right_0, pright) ; bool less ; // less = (left0, Left_1 [pleft]) < (right0, Right_1 [pright]) GB_LT (less, left0, Left_1 [pleft], right0, Right_1 [pright]) ; if (less) { // S [p] = Left [pleft++] GB_COPY (S_0, p, Left_0, pleft) ; S_1 [p] = Left_1 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] GB_COPY (S_0, p, Right_0, pright) ; S_1 [p] = Right_1 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Left_0, pleft), nremaining * GB_SIZE) ; memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Right_0, pright), nremaining * GB_SIZE) ; memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // GB_SORT (vector) parallel mergesort of a single vector //------------------------------------------------------------------------------ static void GB_SORT (vector) // sort the pair of arrays A_0, A_1 ( GB_TYPE *restrict A_0, // size n array int64_t *restrict A_1, // size n array GB_TYPE *restrict W_0, // workspace of size n * GB_SIZE bytes int64_t *restrict W, // int64_t workspace of size n+6*ntasks+1 const int64_t n, const int kk, const int ntasks, const int nthreads // # of threads to use #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // split up workspace //-------------------------------------------------------------------------- ASSERT (nthreads > 2 && n >= GB_BASECASE) ; int64_t *T = W ; int64_t *restrict W_1 = T ; T += n ; int64_t *restrict L_task = T ; T += ntasks ; int64_t *restrict L_len = T ; T += ntasks ; int64_t *restrict R_task = T ; T += ntasks ; int64_t *restrict R_len = T ; T += ntasks ; int64_t *restrict S_task = T ; T += ntasks ; int64_t *restrict Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- GB_eslice (Slice, n, ntasks) ; int tid ; for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; uint64_t seed = tid ; GB_SORT (quicksort) (GB_ADDR (A_0, leaf), A_1 + leaf, leafsize, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for (int k = kk ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, A_1, Slice [tid], Slice [tid+nt], A_0, A_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (W_0, pS), W_1 + pS, GB_ADDR (A_0, pL), A_1 + pL, nL, GB_ADDR (A_0, pR), A_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, W_1, Slice [tid], Slice [tid+nt], W_0, W_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (A_0, pS), A_1 + pS, GB_ADDR (W_0, pL), W_1 + pL, nL, GB_ADDR (W_0, pR), W_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; } } //------------------------------------------------------------------------------ // sort all vectors in a matrix //------------------------------------------------------------------------------ #undef GB_FREE_WORKSPACE #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Werk, int64_t) ; \ GB_FREE_WORK (&C_skipped, C_skipped_size) ; \ GB_FREE_WORK (&W_0, W_0_size) ; \ GB_FREE_WORK (&W, W_size) ; \ } static GrB_Info GB_SORT (matrix) ( GrB_Matrix C, // matrix sorted in-place #if GB_SORT_UDT GrB_BinaryOp op, // comparator for user-defined types only #endif GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C to sort", GB0) ; ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (GB_IS_SPARSE (C) || GB_IS_HYPERSPARSE (C)) ; #if GB_SORT_UDT ASSERT_BINARYOP_OK (op, "op", GB0) ; ASSERT (op->ztype == GrB_BOOL) ; ASSERT (op->xtype == op->ytype) ; #endif int64_t cnz = GB_nnz (C) ; if (C->iso || cnz <= 1) { // nothing to do return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // get input //-------------------------------------------------------------------------- int64_t cnvec = C->nvec ; int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; GB_TYPE *restrict Cx = (GB_TYPE *) C->x ; // workspace GB_TYPE *restrict W_0 = NULL ; size_t W_0_size = 0 ; int64_t *restrict W = NULL ; size_t W_size = 0 ; int64_t *restrict C_skipped = NULL ; size_t C_skipped_size = 0 ; GB_WERK_DECLARE (Werk, int64_t) ; #if GB_SORT_UDT // get typesize, and function pointers for operators and typecasting GrB_Type ctype = C->type ; size_t csize = ctype->size ; size_t xsize = op->xtype->size ; GxB_binary_function flt = op->binop_function ; GB_cast_function fcast = GB_cast_factory (op->xtype->code, ctype->code) ; #endif //========================================================================== // phase1: sort all short vectors //========================================================================== // slice the C matrix into tasks for phase 1 GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (cnz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (ntasks, cnvec) ; ntasks = GB_IMAX (ntasks, 1) ; // printf ("phase1: threads %d tasks %d\n", nthreads, ntasks) ; GB_WERK_PUSH (Werk, 3*ntasks + 2, int64_t) ; if (Werk == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict C_max = Werk ; // size ntasks int64_t *restrict C_skip = Werk + ntasks ; // size ntasks+1 int64_t *restrict C_slice = Werk + 2*ntasks + 1; // size ntasks+1 GB_pslice (C_slice, Cp, cnvec, ntasks, false) ; // sort all short vectors in parallel, one thread per vector int tid ; for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t task_max_length = 0 ; int64_t n_skipped = 0 ; for (int64_t k = kfirst ; k < klast ; k++) { // sort the vector C(:,k), unless it is too long const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz <= GB_BASECASE || nthreads == 1) { // printf ("\n------------sort: %ld cknz %ld\n", k, cknz) ; uint64_t seed = k ; GB_SORT (quicksort) (GB_ADDR (Cx, pC_start), Ci + pC_start, cknz, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // printf ("\n------------skip: %ld cknz %ld\n", k, cknz) ; n_skipped++ ; } task_max_length = GB_IMAX (task_max_length, cknz) ; } C_max [tid] = task_max_length ; C_skip [tid] = n_skipped ; } // find max vector length and return if all vectors are now sorted int64_t max_length = 0 ; for (tid = 0 ; tid < ntasks ; tid++) { max_length = GB_IMAX (max_length, C_max [tid]) ; } if (max_length <= GB_BASECASE || nthreads == 1) { // all vectors are sorted GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; } //========================================================================== // phase2: sort all long vectors in parallel //========================================================================== //-------------------------------------------------------------------------- // construct a list of vectors that must still be sorted //-------------------------------------------------------------------------- GB_cumsum (C_skip, ntasks, NULL, 1, Context) ; int64_t total_skipped = C_skip [ntasks] ; C_skipped = GB_MALLOC_WORK (total_skipped, int64_t, &C_skipped_size) ; if (C_skipped == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t n_skipped = C_skip [tid] ; for (int64_t k = kfirst ; k < klast ; k++) { const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz > GB_BASECASE) { // C(:,k) was not sorted C_skipped [n_skipped++] = k ; } } } //-------------------------------------------------------------------------- // determine # of tasks for each vector in phase 2 //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 quicksort leaves // 5 to 16 threads: 6 levels, 64 quicksort leaves // 17 to 64 threads: 8 levels, 256 quicksort leaves // 65 to 256 threads: 10 levels, 1024 quicksort leaves // 256 to 1024 threads: 12 levels, 4096 quicksort leaves // ... int kk = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks2 = 1 << kk ; // printf ("phase2: threads %d tasks %d skipped %ld\n", nthreads, ntasks2, // total_skipped) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- W = GB_MALLOC_WORK (max_length + 6*ntasks2 + 1, int64_t, &W_size) ; W_0 = (GB_TYPE *) GB_MALLOC_WORK (max_length * GB_SIZE, GB_void, &W_0_size) ; if (W == NULL || W_0 == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // sort each long vector using all available threads //-------------------------------------------------------------------------- for (int64_t t = 0 ; t < total_skipped ; t++) { const int64_t k = C_skipped [t] ; const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; ASSERT (cknz > GB_BASECASE) ; GB_SORT (vector) (GB_ADDR (Cx, pC_start), Ci + pC_start, W_0, W, cknz, kk, ntasks2, nthreads #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; C->jumbled = true ; ASSERT_MATRIX_OK (C, "C sorted by value", GB0) ; return (GrB_SUCCESS) ; } #undef GB_SORT #undef GB_TYPE
//------------------------------------------------------------------------------ // GB_sort_template: sort all vectors in a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // macros: // GB_SORT (func) defined as GB_sort_func_TYPE_ascend or _descend, // GB_msort_ISO_ascend or _descend, // or GB_msort_func_UDT // GB_TYPE bool, int8_, ... or GB_void for UDT or ISO // GB_ADDR(A,p) A+p for builtin, A + p * GB_SIZE otherwise // GB_SIZE size of each entry: sizeof (GB_TYPE) for built-in // GB_GET(x,X,i) x = X [i] for built-in, memcpy for UDT // GB_COPY(A,i,C,k) A[i] = C [k] // GB_SWAP(A,i,k) swap A[i] and A[k] // GB_LT compare two entries, x < y, or x > y for descending sort //------------------------------------------------------------------------------ // GB_SORT (partition): use a pivot to partition an array //------------------------------------------------------------------------------ // C.A.R Hoare partition method, partitions an array in-place via a pivot. // k = partition (A, n) partitions A [0:n-1] such that all entries in // A [0:k] are <= all entries in A [k+1:n-1]. static inline int64_t GB_SORT (partition) ( GB_TYPE *restrict A_0, // size n arrays to partition int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to partition uint64_t *seed // random number seed, modified on output #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { // select a pivot at random int64_t pivot = ((n < GB_RAND_MAX) ? GB_rand15 (seed) : GB_rand (seed)) % n; // Pivot = A [pivot] GB_GET (Pivot0, A_0, pivot) ; // Pivot0 = A_0 [pivot] int64_t Pivot1 = A_1 [pivot] ; // At the top of the while loop, A [left+1...right-1] is considered, and // entries outside this range are in their proper place and not touched. // Since the input specification of this function is to partition A // [0..n-1], left must start at -1 and right must start at n. int64_t left = -1 ; int64_t right = n ; // keep partitioning until the left and right sides meet while (true) { // loop invariant: A [0..left] < pivot and A [right..n-1] > Pivot, // so the region to be considered is A [left+1 ... right-1]. // increment left until finding an entry A [left] >= Pivot bool less ; do { left++ ; // a0 = A_0 [left] GB_GET (a0, A_0, left) ; // less = (a0, A_1 [left]) < (Pivot0, Pivot1) GB_LT (less, a0, A_1 [left], Pivot0, Pivot1) ; } while (less) ; // decrement right until finding an entry A [right] <= Pivot do { right-- ; // a0 = A_0 [right] GB_GET (a1, A_0, right) ; // less = (Pivot0, Pivot1) < (a1, A_1 [right]) GB_LT (less, Pivot0, Pivot1, a1, A_1 [right]) ; } while (less) ; // now A [0..left-1] < pivot and A [right+1..n-1] > pivot, but // A [left] > pivot and A [right] < pivot, so these two entries // are out of place and must be swapped. // However, if the two sides have met, the partition is finished. if (left >= right) { // A has been partitioned into A [0:right] and A [right+1:n-1]. // k = right+1, so A is split into A [0:k-1] and A [k:n-1]. return (right + 1) ; } // since A [left] > pivot and A [right] < pivot, swap them GB_SWAP (A_0, left, right) ; int64_t t1 = A_1 [left] ; A_1 [left] = A_1 [right] ; A_1 [right] = t1 ; // after the swap this condition holds: // A [0..left] < pivot and A [right..n-1] > pivot } } //------------------------------------------------------------------------------ // GB_SORT (quicksort): recursive single-threaded quicksort //------------------------------------------------------------------------------ static void GB_SORT (quicksort) // sort A [0:n-1] ( GB_TYPE *restrict A_0, // size n arrays to sort int64_t *restrict A_1, // size n array const int64_t n, // size of the array(s) to sort uint64_t *seed // random number seed #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { if (n < 20) { // in-place insertion sort on A [0:n-1], where n is small for (int64_t k = 1 ; k < n ; k++) { for (int64_t j = k ; j > 0 ; j--) { // a0 = A_0 [j] GB_GET (a0, A_0, j) ; // a1 = A_0 [j-1] GB_GET (a1, A_0, j-1) ; // break if A [j] >= A [j-1] bool less ; // less = (a0, A_1 [j]) < (a1, A_1 [j-1]) GB_LT (less, a0, A_1 [j], a1, A_1 [j-1]) ; if (!less) break ; // swap A [j-1] and A [j] GB_SWAP (A_0, j-1, j) ; int64_t t1 = A_1 [j-1] ; A_1 [j-1] = A_1 [j] ; A_1 [j] = t1 ; } } } else { // partition A [0:n-1] into A [0:k-1] and A [k:n-1] int64_t k = GB_SORT (partition) (A_0, A_1, n, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort each partition // sort A [0:k-1] GB_SORT (quicksort) (A_0, A_1, k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; // sort A [k:n-1] GB_SORT (quicksort) (GB_ADDR (A_0, k), A_1 + k, n-k, seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (binary_search): binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t GB_SORT (binary_search) // return pleft ( const GB_TYPE *restrict Y_0, // Pivot is Y [pivot] const int64_t *restrict Y_1, const int64_t pivot, const GB_TYPE *restrict X_0, // search in X [p_start..p_end_-1] const int64_t *restrict X_1, const int64_t p_start, const int64_t p_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; GB_GET (Pivot0, Y_0, pivot) ; // Pivot0 = Y_0 [pivot] int64_t Pivot1 = Y_1 [pivot] ; bool less ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // x0 = X_0 [pmiddle] GB_GET (x0, X_0, pmiddle) ; // less = (x0, X_1 [pmiddle]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pmiddle], Pivot0, Pivot1) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && (X_1 [pleft] == Pivot1) ; // Modify pleft and pright: if (!found && (pleft == pright)) { // x0 = X_0 [pleft] GB_GET (x0, X_0, pleft) ; // less = (x0, X_1 [pleft]) < (Pivot0, Pivot1) GB_LT (less, x0, X_1 [pleft], Pivot0, Pivot1) ; if (less) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // GB_SORT (create_merge_tasks) //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. static void GB_SORT (create_merge_tasks) ( // output: int64_t *restrict L_task, // L_task [t0...t0+ntasks-1] computed int64_t *restrict L_len, // L_len [t0...t0+ntasks-1] computed int64_t *restrict R_task, // R_task [t0...t0+ntasks-1] computed int64_t *restrict R_len, // R_len [t0...t0+ntasks-1] computed int64_t *restrict S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const GB_TYPE *restrict L_0, // Left = L [pL_start...pL_end-1] const int64_t *restrict L_1, const int64_t pL_start, const int64_t pL_end, const GB_TYPE *restrict R_0, // Right = R [pR_start...pR_end-1] const int64_t *restrict R_1, const int64_t pR_start, const int64_t pR_end #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = GB_SORT (binary_search) ( L_0, L_1, pleft, R_0, R_1, pR_start, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = GB_SORT (binary_search) ( R_0, R_1, pright, L_0, L_1, pL_start, pL_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = GB_IMAX (ntasks0, 1) ; ntasks0 = GB_IMIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, L_1, pL_start, pleft, R_0, R_1, pR_start, pright #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, L_1, pleft, pL_end, R_0, R_1, pright, pR_end #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } } //------------------------------------------------------------------------------ // GB_SORT (merge): merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void GB_SORT (merge) ( GB_TYPE *restrict S_0, // output of length nleft + nright int64_t *restrict S_1, const GB_TYPE *restrict Left_0, // left input of length nleft const int64_t *restrict Left_1, const int64_t nleft, const GB_TYPE *restrict Right_0, // right input of length nright const int64_t *restrict Right_1, const int64_t nright #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { // left0 = Left_0 [pleft] GB_GET (left0, Left_0, pleft) ; // right0 = Right_0 [pright] GB_GET (right0, Right_0, pright) ; bool less ; // less = (left0, Left_1 [pleft]) < (right0, Right_1 [pright]) GB_LT (less, left0, Left_1 [pleft], right0, Right_1 [pright]) ; if (less) { // S [p] = Left [pleft++] GB_COPY (S_0, p, Left_0, pleft) ; S_1 [p] = Left_1 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] GB_COPY (S_0, p, Right_0, pright) ; S_1 [p] = Right_1 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Left_0, pleft), nremaining * GB_SIZE) ; memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (GB_ADDR (S_0, p), GB_ADDR (Right_0, pright), nremaining * GB_SIZE) ; memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // GB_SORT (vector) parallel mergesort of a single vector //------------------------------------------------------------------------------ static void GB_SORT (vector) // sort the pair of arrays A_0, A_1 ( GB_TYPE *restrict A_0, // size n array int64_t *restrict A_1, // size n array GB_TYPE *restrict W_0, // workspace of size n * GB_SIZE bytes int64_t *restrict W, // int64_t workspace of size n+6*ntasks+1 const int64_t n, const int kk, const int ntasks, const int nthreads // # of threads to use #if GB_SORT_UDT , size_t csize // size of GB_TYPE , size_t xsize // size of op->xtype , GxB_binary_function flt // function to test for < (ascend), > (descend) , GB_cast_function fcast // cast entry to inputs of flt #endif ) { //-------------------------------------------------------------------------- // split up workspace //-------------------------------------------------------------------------- ASSERT (nthreads > 2 && n >= GB_BASECASE) ; int64_t *T = W ; int64_t *restrict W_1 = T ; T += n ; int64_t *restrict L_task = T ; T += ntasks ; int64_t *restrict L_len = T ; T += ntasks ; int64_t *restrict R_task = T ; T += ntasks ; int64_t *restrict R_len = T ; T += ntasks ; int64_t *restrict S_task = T ; T += ntasks ; int64_t *restrict Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- GB_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; uint64_t seed = tid ; GB_SORT (quicksort) (GB_ADDR (A_0, leaf), A_1 + leaf, leafsize, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for (int k = kk ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // TODO: skip k and k-1 for each group of 4 sublists of A if they are // already sorted with respect to each other. // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, A_1, Slice [tid], Slice [tid+nt], A_0, A_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (W_0, pS), W_1 + pS, GB_ADDR (A_0, pL), A_1 + pL, nL, GB_ADDR (A_0, pR), A_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist GB_SORT (create_merge_tasks) ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, W_1, Slice [tid], Slice [tid+nt], W_0, W_1, Slice [tid+nt], Slice [tid+2*nt] #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; GB_SORT (merge) ( GB_ADDR (A_0, pS), A_1 + pS, GB_ADDR (W_0, pL), W_1 + pL, nL, GB_ADDR (W_0, pR), W_1 + pR, nR #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } nt = 2*nt ; } } //------------------------------------------------------------------------------ // sort all vectors in a matrix //------------------------------------------------------------------------------ #undef GB_FREE_WORKSPACE #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Werk, int64_t) ; \ GB_FREE_WORK (&C_skipped, C_skipped_size) ; \ GB_FREE_WORK (&W_0, W_0_size) ; \ GB_FREE_WORK (&W, W_size) ; \ } static GrB_Info GB_SORT (matrix) ( GrB_Matrix C, // matrix sorted in-place #if GB_SORT_UDT GrB_BinaryOp op, // comparator for user-defined types only #endif GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (C, "C to sort", GB0) ; ASSERT (GB_JUMBLED_OK (C)) ; ASSERT (GB_IS_SPARSE (C) || GB_IS_HYPERSPARSE (C)) ; #if GB_SORT_UDT ASSERT_BINARYOP_OK (op, "op", GB0) ; ASSERT (op->ztype == GrB_BOOL) ; ASSERT (op->xtype == op->ytype) ; #endif int64_t cnz = GB_nnz (C) ; if (C->iso || cnz <= 1) { // nothing to do return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // get input //-------------------------------------------------------------------------- int64_t cnvec = C->nvec ; int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; GB_TYPE *restrict Cx = (GB_TYPE *) C->x ; // workspace GB_TYPE *restrict W_0 = NULL ; size_t W_0_size = 0 ; int64_t *restrict W = NULL ; size_t W_size = 0 ; int64_t *restrict C_skipped = NULL ; size_t C_skipped_size = 0 ; GB_WERK_DECLARE (Werk, int64_t) ; #if GB_SORT_UDT // get typesize, and function pointers for operators and typecasting GrB_Type ctype = C->type ; size_t csize = ctype->size ; size_t xsize = op->xtype->size ; GxB_binary_function flt = op->binop_function ; GB_cast_function fcast = GB_cast_factory (op->xtype->code, ctype->code) ; #endif //========================================================================== // phase1: sort all short vectors //========================================================================== // slice the C matrix into tasks for phase 1 GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (cnz, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (ntasks, cnvec) ; ntasks = GB_IMAX (ntasks, 1) ; // printf ("phase1: threads %d tasks %d\n", nthreads, ntasks) ; GB_WERK_PUSH (Werk, 3*ntasks + 2, int64_t) ; if (Werk == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } int64_t *restrict C_max = Werk ; // size ntasks int64_t *restrict C_skip = Werk + ntasks ; // size ntasks+1 int64_t *restrict C_slice = Werk + 2*ntasks + 1; // size ntasks+1 GB_pslice (C_slice, Cp, cnvec, ntasks, false) ; // sort all short vectors in parallel, one thread per vector int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t task_max_length = 0 ; int64_t n_skipped = 0 ; for (int64_t k = kfirst ; k < klast ; k++) { // sort the vector C(:,k), unless it is too long const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz <= GB_BASECASE || nthreads == 1) { // printf ("\n------------sort: %ld cknz %ld\n", k, cknz) ; uint64_t seed = k ; GB_SORT (quicksort) (GB_ADDR (Cx, pC_start), Ci + pC_start, cknz, &seed #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } else { // printf ("\n------------skip: %ld cknz %ld\n", k, cknz) ; n_skipped++ ; } task_max_length = GB_IMAX (task_max_length, cknz) ; } C_max [tid] = task_max_length ; C_skip [tid] = n_skipped ; } // find max vector length and return if all vectors are now sorted int64_t max_length = 0 ; for (tid = 0 ; tid < ntasks ; tid++) { max_length = GB_IMAX (max_length, C_max [tid]) ; } if (max_length <= GB_BASECASE || nthreads == 1) { // all vectors are sorted GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; } //========================================================================== // phase2: sort all long vectors in parallel //========================================================================== //-------------------------------------------------------------------------- // construct a list of vectors that must still be sorted //-------------------------------------------------------------------------- GB_cumsum (C_skip, ntasks, NULL, 1, Context) ; int64_t total_skipped = C_skip [ntasks] ; C_skipped = GB_MALLOC_WORK (total_skipped, int64_t, &C_skipped_size) ; if (C_skipped == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { const int64_t kfirst = C_slice [tid] ; const int64_t klast = C_slice [tid+1] ; int64_t n_skipped = C_skip [tid] ; for (int64_t k = kfirst ; k < klast ; k++) { const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; if (cknz > GB_BASECASE) { // C(:,k) was not sorted C_skipped [n_skipped++] = k ; } } } //-------------------------------------------------------------------------- // determine # of tasks for each vector in phase 2 //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 quicksort leaves // 5 to 16 threads: 6 levels, 64 quicksort leaves // 17 to 64 threads: 8 levels, 256 quicksort leaves // 65 to 256 threads: 10 levels, 1024 quicksort leaves // 256 to 1024 threads: 12 levels, 4096 quicksort leaves // ... int kk = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks2 = 1 << kk ; // printf ("phase2: threads %d tasks %d skipped %ld\n", nthreads, ntasks2, // total_skipped) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- W = GB_MALLOC_WORK (max_length + 6*ntasks2 + 1, int64_t, &W_size) ; W_0 = (GB_TYPE *) GB_MALLOC_WORK (max_length * GB_SIZE, GB_void, &W_0_size) ; if (W == NULL || W_0 == NULL) { // out of memory GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // sort each long vector using all available threads //-------------------------------------------------------------------------- for (int64_t t = 0 ; t < total_skipped ; t++) { const int64_t k = C_skipped [t] ; const int64_t pC_start = Cp [k] ; const int64_t pC_end = Cp [k+1] ; const int64_t cknz = pC_end - pC_start ; ASSERT (cknz > GB_BASECASE) ; GB_SORT (vector) (GB_ADDR (Cx, pC_start), Ci + pC_start, W_0, W, cknz, kk, ntasks2, nthreads #if GB_SORT_UDT , csize, xsize, flt, fcast #endif ) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; C->jumbled = true ; ASSERT_MATRIX_OK (C, "C sorted by value", GB0) ; return (GrB_SUCCESS) ; } #undef GB_SORT #undef GB_TYPE
residual.flux.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ // This routines calculates the residual (res=rhs-Ax) using the linear operator specified in the apply_op_ijk macro // This requires exchanging a ghost zone and/or enforcing a boundary condition. // NOTE, x_id must be distinct from rhs_id and res_id void residual(level_type * level, int res_id, int x_id, int rhs_id, double a, double b){ if(level->fluxes==NULL){posix_memalign( (void**)&(level->fluxes), 64, level->num_threads*(level->box_jStride)*(BLOCKCOPY_TILE_J+1)*(4)*sizeof(double) );} // exchange the boundary for x in prep for Ax... exchange_boundary(level,x_id,stencil_get_shape()); apply_BCs(level,x_id,stencil_get_shape()); // now do residual/restriction proper... double _timeStart = getTime(); double h2inv = 1.0/(level->h*level->h); // loop over all block/tiles this process owns... #pragma omp parallel if(level->num_my_blocks>1) { int block; int threadID=0;if(level->num_my_blocks>1)threadID = omp_get_thread_num(); double * __restrict__ flux_i = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 0); double * __restrict__ flux_j = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 1); double * __restrict__ flux_k = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 2); for(block=threadID;block<level->num_my_blocks;block+=level->num_threads){ const int box = level->my_blocks[block].read.box; const int jlo = level->my_blocks[block].read.j; const int klo = level->my_blocks[block].read.k; const int idim = level->my_blocks[block].dim.i; const int jdim = level->my_blocks[block].dim.j; const int kdim = level->my_blocks[block].dim.k; const int ghosts = level->my_boxes[box].ghosts; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int flux_kStride = (BLOCKCOPY_TILE_J+1)*level->box_jStride; const double * __restrict__ x = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); // i.e. [0] = first non ghost zone point const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); double * __restrict__ res = level->my_boxes[box].vectors[ res_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); #ifdef __INTEL_COMPILER __assume_aligned(x ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(rhs ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(alpha ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_i,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_j,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_k,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(res ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_i,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_j,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_k,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume( (+jStride) % BOX_ALIGN_JSTRIDE == 0); // e.g. jStride%4==0 or jStride%8==0, hence x+jStride is aligned __assume( (-jStride) % BOX_ALIGN_JSTRIDE == 0); __assume( (+kStride) % BOX_ALIGN_KSTRIDE == 0); __assume( (-kStride) % BOX_ALIGN_KSTRIDE == 0); __assume(((jdim )*jStride) % BOX_ALIGN_JSTRIDE == 0); __assume(((jdim+1)*jStride) % BOX_ALIGN_JSTRIDE == 0); __assume( (flux_kStride) % BOX_ALIGN_JSTRIDE == 0); #elif __xlC__ __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), rhs ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), alpha ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_i); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_j); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_k); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), res ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_i); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_j); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_k); #endif int i,j,k,ij; for(k=0;k<kdim;k++){ double * __restrict__ flux_klo = flux_k + ((k )&0x1)*flux_kStride; double * __restrict__ flux_khi = flux_k + ((k+1)&0x1)*flux_kStride; #if (BLOCKCOPY_TILE_I != 10000) #error operators.flux.c cannot block the unit stride dimension (BLOCKCOPY_TILE_I!=10000). #endif // calculate fluxes (pipeline flux_k)... #if (_OPENMP>=201307) #pragma omp simd aligned(beta_i,x,flux_i:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<jdim*jStride;ij++){ // flux_i for jdim pencils... int ijk = ij + (k )*kStride; flux_i[ ij] = beta_dxdi(x,ijk ); } #if (_OPENMP>=201307) #pragma omp simd aligned(beta_j,x,flux_j:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<(jdim+1)*jStride;ij++){ // flux_j for jdim+1 pencils... int ijk = ij + (k )*kStride; flux_j[ ij] = beta_dxdj(x,ijk ); } if(k==0){ // startup / prolog for flux_k on jdim pencils... #if (_OPENMP>=201307) #pragma omp simd aligned(beta_k,x,flux_klo:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<jdim*jStride;ij++){ int ijk = ij + 0; flux_klo[ij] = beta_dxdk(x,ijk); }} #if (_OPENMP>=201307) #pragma omp simd aligned(beta_k,x,flux_khi:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<jdim*jStride;ij++){ // for flux_k on jdim pencils... int ijk = ij + (k+1)*kStride; flux_khi[ij] = beta_dxdk(x,ijk); // flux_k needs k+1 } // residual... #if (_OPENMP>=201307) #pragma omp simd aligned(flux_i,flux_j,flux_klo,flux_khi,alpha,rhs,x,res:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<(jdim-1)*jStride+idim;ij++){ int ijk = ij + k*kStride; double Lx = - flux_i[ ij] + flux_i[ ij+ 1] - flux_j[ ij] + flux_j[ ij+jStride] - flux_klo[ij] + flux_khi[ij ]; #ifdef USE_HELMHOLTZ double Ax = a*alpha[ijk]*x[ijk] - b*Lx; #else double Ax = -b*Lx; #endif res[ijk] = rhs[ijk]-Ax; } } // kdim } // block } // omp level->timers.residual += (double)(getTime()-_timeStart); }
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ // This routines calculates the residual (res=rhs-Ax) using the linear operator specified in the apply_op_ijk macro // This requires exchanging a ghost zone and/or enforcing a boundary condition. // NOTE, x_id must be distinct from rhs_id and res_id void residual(level_type * level, int res_id, int x_id, int rhs_id, double a, double b){ if(level->fluxes==NULL){posix_memalign( (void**)&(level->fluxes), 64, level->num_threads*(level->box_jStride)*(BLOCKCOPY_TILE_J+1)*(4)*sizeof(double) );} // exchange the boundary for x in prep for Ax... exchange_boundary(level,x_id,stencil_get_shape()); apply_BCs(level,x_id,stencil_get_shape()); // now do residual/restriction proper... double _timeStart = getTime(); double h2inv = 1.0/(level->h*level->h); // loop over all block/tiles this process owns... int block; int threadID=0;if(level->num_my_blocks>1)threadID = omp_get_thread_num(); double * __restrict__ flux_i = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 0); double * __restrict__ flux_j = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 1); double * __restrict__ flux_k = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 2); for(block=threadID;block<level->num_my_blocks;block+=level->num_threads){ const int box = level->my_blocks[block].read.box; const int jlo = level->my_blocks[block].read.j; const int klo = level->my_blocks[block].read.k; const int idim = level->my_blocks[block].dim.i; const int jdim = level->my_blocks[block].dim.j; const int kdim = level->my_blocks[block].dim.k; const int ghosts = level->my_boxes[box].ghosts; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int flux_kStride = (BLOCKCOPY_TILE_J+1)*level->box_jStride; const double * __restrict__ x = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); // i.e. [0] = first non ghost zone point const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); double * __restrict__ res = level->my_boxes[box].vectors[ res_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); #ifdef __INTEL_COMPILER __assume_aligned(x ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(rhs ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(alpha ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_i,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_j,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_k,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(res ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_i,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_j,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_k,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume( (+jStride) % BOX_ALIGN_JSTRIDE == 0); // e.g. jStride%4==0 or jStride%8==0, hence x+jStride is aligned __assume( (-jStride) % BOX_ALIGN_JSTRIDE == 0); __assume( (+kStride) % BOX_ALIGN_KSTRIDE == 0); __assume( (-kStride) % BOX_ALIGN_KSTRIDE == 0); __assume(((jdim )*jStride) % BOX_ALIGN_JSTRIDE == 0); __assume(((jdim+1)*jStride) % BOX_ALIGN_JSTRIDE == 0); __assume( (flux_kStride) % BOX_ALIGN_JSTRIDE == 0); #elif __xlC__ __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), rhs ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), alpha ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_i); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_j); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_k); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), res ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_i); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_j); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_k); #endif int i,j,k,ij; for(k=0;k<kdim;k++){ double * __restrict__ flux_klo = flux_k + ((k )&0x1)*flux_kStride; double * __restrict__ flux_khi = flux_k + ((k+1)&0x1)*flux_kStride; #if (BLOCKCOPY_TILE_I != 10000) #error operators.flux.c cannot block the unit stride dimension (BLOCKCOPY_TILE_I!=10000). #endif // calculate fluxes (pipeline flux_k)... #if (_OPENMP>=201307) #endif for(ij=0;ij<jdim*jStride;ij++){ // flux_i for jdim pencils... int ijk = ij + (k )*kStride; flux_i[ ij] = beta_dxdi(x,ijk ); } #if (_OPENMP>=201307) #endif for(ij=0;ij<(jdim+1)*jStride;ij++){ // flux_j for jdim+1 pencils... int ijk = ij + (k )*kStride; flux_j[ ij] = beta_dxdj(x,ijk ); } if(k==0){ // startup / prolog for flux_k on jdim pencils... #if (_OPENMP>=201307) #endif for(ij=0;ij<jdim*jStride;ij++){ int ijk = ij + 0; flux_klo[ij] = beta_dxdk(x,ijk); }} #if (_OPENMP>=201307) #endif for(ij=0;ij<jdim*jStride;ij++){ // for flux_k on jdim pencils... int ijk = ij + (k+1)*kStride; flux_khi[ij] = beta_dxdk(x,ijk); // flux_k needs k+1 } // residual... #if (_OPENMP>=201307) #endif for(ij=0;ij<(jdim-1)*jStride+idim;ij++){ int ijk = ij + k*kStride; double Lx = - flux_i[ ij] + flux_i[ ij+ 1] - flux_j[ ij] + flux_j[ ij+jStride] - flux_klo[ij] + flux_khi[ij ]; #ifdef USE_HELMHOLTZ double Ax = a*alpha[ijk]*x[ijk] - b*Lx; #else double Ax = -b*Lx; #endif res[ijk] = rhs[ijk]-Ax; } } // kdim } // block // omp level->timers.residual += (double)(getTime()-_timeStart); }
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ // This routines calculates the residual (res=rhs-Ax) using the linear operator specified in the apply_op_ijk macro // This requires exchanging a ghost zone and/or enforcing a boundary condition. // NOTE, x_id must be distinct from rhs_id and res_id void residual(level_type * level, int res_id, int x_id, int rhs_id, double a, double b){ if(level->fluxes==NULL){posix_memalign( (void**)&(level->fluxes), 64, level->num_threads*(level->box_jStride)*(BLOCKCOPY_TILE_J+1)*(4)*sizeof(double) );} // exchange the boundary for x in prep for Ax... exchange_boundary(level,x_id,stencil_get_shape()); apply_BCs(level,x_id,stencil_get_shape()); // now do residual/restriction proper... double _timeStart = getTime(); double h2inv = 1.0/(level->h*level->h); // loop over all block/tiles this process owns... #pragma omp parallel if(level->num_my_blocks>1) { int block; int threadID=0;if(level->num_my_blocks>1)threadID = omp_get_thread_num(); double * __restrict__ flux_i = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 0); double * __restrict__ flux_j = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 1); double * __restrict__ flux_k = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 2); for(block=threadID;block<level->num_my_blocks;block+=level->num_threads){ const int box = level->my_blocks[block].read.box; const int jlo = level->my_blocks[block].read.j; const int klo = level->my_blocks[block].read.k; const int idim = level->my_blocks[block].dim.i; const int jdim = level->my_blocks[block].dim.j; const int kdim = level->my_blocks[block].dim.k; const int ghosts = level->my_boxes[box].ghosts; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int flux_kStride = (BLOCKCOPY_TILE_J+1)*level->box_jStride; const double * __restrict__ x = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); // i.e. [0] = first non ghost zone point const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); double * __restrict__ res = level->my_boxes[box].vectors[ res_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); #ifdef __INTEL_COMPILER __assume_aligned(x ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(rhs ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(alpha ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_i,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_j,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(beta_k,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(res ,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_i,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_j,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume_aligned(flux_k,BOX_ALIGN_JSTRIDE*sizeof(double)); __assume( (+jStride) % BOX_ALIGN_JSTRIDE == 0); // e.g. jStride%4==0 or jStride%8==0, hence x+jStride is aligned __assume( (-jStride) % BOX_ALIGN_JSTRIDE == 0); __assume( (+kStride) % BOX_ALIGN_KSTRIDE == 0); __assume( (-kStride) % BOX_ALIGN_KSTRIDE == 0); __assume(((jdim )*jStride) % BOX_ALIGN_JSTRIDE == 0); __assume(((jdim+1)*jStride) % BOX_ALIGN_JSTRIDE == 0); __assume( (flux_kStride) % BOX_ALIGN_JSTRIDE == 0); #elif __xlC__ __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), rhs ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), alpha ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_i); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_j); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_k); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), res ); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_i); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_j); __alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_k); #endif int i,j,k,ij; for(k=0;k<kdim;k++){ double * __restrict__ flux_klo = flux_k + ((k )&0x1)*flux_kStride; double * __restrict__ flux_khi = flux_k + ((k+1)&0x1)*flux_kStride; #if (BLOCKCOPY_TILE_I != 10000) #error operators.flux.c cannot block the unit stride dimension (BLOCKCOPY_TILE_I!=10000). #endif // calculate fluxes (pipeline flux_k)... #if (_OPENMP>=201307) #pragma omp simd aligned(beta_i,x,flux_i:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<jdim*jStride;ij++){ // flux_i for jdim pencils... int ijk = ij + (k )*kStride; flux_i[ ij] = beta_dxdi(x,ijk ); } #if (_OPENMP>=201307) #pragma omp simd aligned(beta_j,x,flux_j:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<(jdim+1)*jStride;ij++){ // flux_j for jdim+1 pencils... int ijk = ij + (k )*kStride; flux_j[ ij] = beta_dxdj(x,ijk ); } if(k==0){ // startup / prolog for flux_k on jdim pencils... #if (_OPENMP>=201307) #pragma omp simd aligned(beta_k,x,flux_klo:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<jdim*jStride;ij++){ int ijk = ij + 0; flux_klo[ij] = beta_dxdk(x,ijk); }} #if (_OPENMP>=201307) #pragma omp simd aligned(beta_k,x,flux_khi:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<jdim*jStride;ij++){ // for flux_k on jdim pencils... int ijk = ij + (k+1)*kStride; flux_khi[ij] = beta_dxdk(x,ijk); // flux_k needs k+1 } // residual... #if (_OPENMP>=201307) #pragma omp simd aligned(flux_i,flux_j,flux_klo,flux_khi,alpha,rhs,x,res:BOX_ALIGN_JSTRIDE*sizeof(double)) #endif for(ij=0;ij<(jdim-1)*jStride+idim;ij++){ int ijk = ij + k*kStride; double Lx = - flux_i[ ij] + flux_i[ ij+ 1] - flux_j[ ij] + flux_j[ ij+jStride] - flux_klo[ij] + flux_khi[ij ]; #ifdef USE_HELMHOLTZ double Ax = a*alpha[ijk]*x[ijk] - b*Lx; #else double Ax = -b*Lx; #endif res[ijk] = rhs[ijk]-Ax; } } // kdim } // block } // omp level->timers.residual += (double)(getTime()-_timeStart); }
boxloop_cuda.h
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header info for the BoxLoop * *****************************************************************************/ /*-------------------------------------------------------------------------- * BoxLoop macros: *--------------------------------------------------------------------------*/ #ifndef HYPRE_BOXLOOP_CUDA_HEADER #define HYPRE_BOXLOOP_CUDA_HEADER #if (defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)) && !defined(HYPRE_USING_RAJA) && !defined(HYPRE_USING_KOKKOS) #define HYPRE_LAMBDA [=] __host__ __device__ /* TODO: RL: support 4-D */ typedef struct hypre_Boxloop_struct { HYPRE_Int lsize0, lsize1, lsize2; HYPRE_Int strides0, strides1, strides2; HYPRE_Int bstart0, bstart1, bstart2; HYPRE_Int bsize0, bsize1, bsize2; } hypre_Boxloop; #ifdef __cplusplus extern "C++" { #endif /* ------------------------- * parfor-loop * ------------------------*/ template <typename LOOP_BODY> __global__ void forall_kernel( LOOP_BODY loop_body, HYPRE_Int length ) { const HYPRE_Int idx = hypre_cuda_get_grid_thread_id<1, 1>(); /* const HYPRE_Int number_threads = hypre_cuda_get_grid_num_threads<1,1>(); */ if (idx < length) { loop_body(idx); } } template<typename LOOP_BODY> void BoxLoopforall( HYPRE_Int length, LOOP_BODY loop_body ) { HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle()); if (exec_policy == HYPRE_EXEC_HOST) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx); } } else if (exec_policy == HYPRE_EXEC_DEVICE) { const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); const dim3 gDim = hypre_GetDefaultDeviceGridDimension(length, "thread", bDim); HYPRE_CUDA_LAUNCH( forall_kernel, gDim, bDim, loop_body, length ); } } /* ------------------------------ * parforreduction-loop * -----------------------------*/ template <typename LOOP_BODY, typename REDUCER> __global__ void reductionforall_kernel( HYPRE_Int length, REDUCER reducer, LOOP_BODY loop_body ) { const HYPRE_Int thread_id = hypre_cuda_get_grid_thread_id<1, 1>(); const HYPRE_Int n_threads = hypre_cuda_get_grid_num_threads<1, 1>(); for (HYPRE_Int idx = thread_id; idx < length; idx += n_threads) { loop_body(idx, reducer); } /* reduction in block-level and the save the results in reducer */ reducer.BlockReduce(); } template<typename LOOP_BODY, typename REDUCER> void ReductionBoxLoopforall( HYPRE_Int length, REDUCER & reducer, LOOP_BODY loop_body ) { if (length <= 0) { return; } HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle()); if (exec_policy == HYPRE_EXEC_HOST) { for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx, reducer); } } else if (exec_policy == HYPRE_EXEC_DEVICE) { const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(length, "thread", bDim); /* Note: we assume gDim cannot exceed 1024 * and bDim < WARP * WARP */ gDim.x = hypre_min(gDim.x, 1024); reducer.nblocks = gDim.x; /* hypre_printf("length= %d, blocksize = %d, gridsize = %d\n", length, bDim.x, gDim.x); */ HYPRE_CUDA_LAUNCH( reductionforall_kernel, gDim, bDim, length, reducer, loop_body ); } } #ifdef __cplusplus } #endif /* Get 1-D length of the loop, in hypre__tot */ #define hypre_newBoxLoopInit(ndim, loop_size) \ HYPRE_Int hypre__tot = 1; \ for (HYPRE_Int hypre_d = 0; hypre_d < ndim; hypre_d ++) \ { \ hypre__tot *= loop_size[hypre_d]; \ } /* Initialize struct for box-k */ #define hypre_BoxLoopDataDeclareK(k, ndim, loop_size, dbox, start, stride) \ hypre_Boxloop databox##k; \ /* dim 0 */ \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = start[0] - dbox->imin[0]; \ databox##k.bsize0 = dbox->imax[0] - dbox->imin[0]; \ /* dim 1 */ \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = start[1] - dbox->imin[1]; \ databox##k.bsize1 = dbox->imax[1] - dbox->imin[1]; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ /* dim 2 */ \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = start[2] - dbox->imin[2]; \ databox##k.bsize2 = dbox->imax[2] - dbox->imin[2]; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } #define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \ hypre_Boxloop databox##k; \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = 0; \ databox##k.bsize0 = 0; \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } /* RL: TODO loop_size out of box struct, bsize +1 */ /* Given input 1-D 'idx' in box, get 3-D 'local_idx' in loop_size */ #define hypre_newBoxLoopDeclare(box) \ hypre_Index local_idx; \ HYPRE_Int idx_local = idx; \ hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \ idx_local = idx_local / box.lsize0; \ hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \ idx_local = idx_local / box.lsize1; \ hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \ /* Given input 3-D 'local_idx', get 1-D 'hypre__i' in 'box' */ #define hypre_BoxLoopIncK(k, box, hypre__i) \ HYPRE_Int hypre_boxD##k = 1; \ HYPRE_Int hypre__i = 0; \ hypre__i += (hypre_IndexD(local_idx, 0) * box.strides0 + box.bstart0) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \ hypre__i += (hypre_IndexD(local_idx, 1) * box.strides1 + box.bstart1) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \ hypre__i += (hypre_IndexD(local_idx, 2) * box.strides2 + box.bstart2) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize2 + 1); /* get 3-D local_idx into 'index' */ #define hypre_BoxLoopGetIndex(index) \ index[0] = hypre_IndexD(local_idx, 0); \ index[1] = hypre_IndexD(local_idx, 1); \ index[2] = hypre_IndexD(local_idx, 2); /* BoxLoop 0 */ #define hypre_newBoxLoop0Begin(ndim, loop_size) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_newBoxLoop0End() \ }); \ } /* BoxLoop 1 */ #define hypre_newBoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); #define hypre_newBoxLoop1End(i1) \ }); \ } /* BoxLoop 2 */ #define hypre_newBoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); #define hypre_newBoxLoop2End(i1, i2) \ }); \ } /* BoxLoop 3 */ #define hypre_newBoxLoop3Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim,loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim,loop_size, dbox2, start2, stride2); \ hypre_BoxLoopDataDeclareK(3, ndim,loop_size, dbox3, start3, stride3); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ hypre_BoxLoopIncK(3, databox3, i3); #define hypre_newBoxLoop3End(i1, i2, i3) \ }); \ } /* BoxLoop 4 */ #define hypre_newBoxLoop4Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3, \ dbox4, start4, stride4, i4) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ hypre_BoxLoopDataDeclareK(3, ndim, loop_size, dbox3, start3, stride3); \ hypre_BoxLoopDataDeclareK(4, ndim, loop_size, dbox4, start4, stride4); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ hypre_BoxLoopIncK(3, databox3, i3); \ hypre_BoxLoopIncK(4, databox4, i4); #define hypre_newBoxLoop4End(i1, i2, i3, i4) \ }); \ } /* Basic BoxLoops have no boxes */ /* BoxLoop 1 */ #define zypre_newBasicBoxLoop1Begin(ndim, loop_size, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); /* BoxLoop 2 */ #define zypre_newBasicBoxLoop2Begin(ndim, loop_size, stride1, i1, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \ zypre_BasicBoxLoopDataDeclareK(2, ndim, loop_size, stride2); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ /* TODO: RL just parallel-for, it should not be here, better in utilities */ #define hypre_LoopBegin(size, idx) \ { \ BoxLoopforall(size, HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_LoopEnd() \ }); \ } /* Reduction BoxLoop1 */ #define hypre_BoxLoop1ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, reducesum) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); #define hypre_BoxLoop1ReductionEnd(i1, reducesum) \ }); \ } /* Reduction BoxLoop2 */ #define hypre_BoxLoop2ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, reducesum) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); #define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \ }); \ } /* Renamings */ #define hypre_BoxLoopBlock() 0 #define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin #define hypre_BoxLoop0For hypre_newBoxLoop0For #define hypre_BoxLoop0End hypre_newBoxLoop0End #define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin #define hypre_BoxLoop1For hypre_newBoxLoop1For #define hypre_BoxLoop1End hypre_newBoxLoop1End #define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin #define hypre_BoxLoop2For hypre_newBoxLoop2For #define hypre_BoxLoop2End hypre_newBoxLoop2End #define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin #define hypre_BoxLoop3For hypre_newBoxLoop3For #define hypre_BoxLoop3End hypre_newBoxLoop3End #define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin #define hypre_BoxLoop4For hypre_newBoxLoop4For #define hypre_BoxLoop4End hypre_newBoxLoop4End #define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin #define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin #endif #endif /* #ifndef HYPRE_BOXLOOP_CUDA_HEADER */
/****************************************************************************** * * Header info for the BoxLoop * *****************************************************************************/ /*-------------------------------------------------------------------------- * BoxLoop macros: *--------------------------------------------------------------------------*/ #ifndef HYPRE_BOXLOOP_CUDA_HEADER #define HYPRE_BOXLOOP_CUDA_HEADER #if (defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)) && !defined(HYPRE_USING_RAJA) && !defined(HYPRE_USING_KOKKOS) #define HYPRE_LAMBDA [=] __host__ __device__ /* TODO: RL: support 4-D */ typedef struct hypre_Boxloop_struct { HYPRE_Int lsize0, lsize1, lsize2; HYPRE_Int strides0, strides1, strides2; HYPRE_Int bstart0, bstart1, bstart2; HYPRE_Int bsize0, bsize1, bsize2; } hypre_Boxloop; #ifdef __cplusplus extern "C++" { #endif /* * ------------------------- parfor-loop ------------------------ */ template < typename LOOP_BODY > __global__ void forall_kernel(LOOP_BODY loop_body, HYPRE_Int length) { const HYPRE_Int idx = hypre_cuda_get_grid_thread_id < 1, 1 > (); /* * const HYPRE_Int number_threads = * hypre_cuda_get_grid_num_threads<1,1>(); */ if (idx < length) { loop_body(idx); } } template < typename LOOP_BODY > void BoxLoopforall(HYPRE_Int length, LOOP_BODY loop_body) { HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle()); if (exec_policy == HYPRE_EXEC_HOST) { for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx); } } else if (exec_policy == HYPRE_EXEC_DEVICE) { const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); const dim3 gDim = hypre_GetDefaultDeviceGridDimension(length, "thread", bDim); HYPRE_CUDA_LAUNCH(forall_kernel, gDim, bDim, loop_body, length); } } /* * ------------------------------ parforreduction-loop * ----------------------------- */ template < typename LOOP_BODY, typename REDUCER > __global__ void reductionforall_kernel(HYPRE_Int length, REDUCER reducer, LOOP_BODY loop_body) { const HYPRE_Int thread_id = hypre_cuda_get_grid_thread_id < 1, 1 > (); const HYPRE_Int n_threads = hypre_cuda_get_grid_num_threads < 1, 1 > (); for (HYPRE_Int idx = thread_id; idx < length; idx += n_threads) { loop_body(idx, reducer); } /* reduction in block-level and the save the results in reducer */ reducer.BlockReduce(); } template < typename LOOP_BODY, typename REDUCER > void ReductionBoxLoopforall(HYPRE_Int length, REDUCER & reducer, LOOP_BODY loop_body) { if (length <= 0) { return; } HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle()); if (exec_policy == HYPRE_EXEC_HOST) { for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx, reducer); } } else if (exec_policy == HYPRE_EXEC_DEVICE) { const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(length, "thread", bDim); /* * Note: we assume gDim cannot exceed 1024 and bDim < WARP * WARP */ gDim.x = hypre_min(gDim.x, 1024); reducer.nblocks = gDim.x; /* * hypre_printf("length= %d, blocksize = %d, gridsize = %d\n", * length, bDim.x, gDim.x); */ HYPRE_CUDA_LAUNCH(reductionforall_kernel, gDim, bDim, length, reducer, loop_body); } } #ifdef __cplusplus } #endif /* Get 1-D length of the loop, in hypre__tot */ #define hypre_newBoxLoopInit(ndim, loop_size) \ HYPRE_Int hypre__tot = 1; \ for (HYPRE_Int hypre_d = 0; hypre_d < ndim; hypre_d ++) \ { \ hypre__tot *= loop_size[hypre_d]; \ } /* Initialize struct for box-k */ #define hypre_BoxLoopDataDeclareK(k, ndim, loop_size, dbox, start, stride) \ hypre_Boxloop databox##k; \ /* dim 0 */ \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = start[0] - dbox->imin[0]; \ databox##k.bsize0 = dbox->imax[0] - dbox->imin[0]; \ /* dim 1 */ \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = start[1] - dbox->imin[1]; \ databox##k.bsize1 = dbox->imax[1] - dbox->imin[1]; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ /* dim 2 */ \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = start[2] - dbox->imin[2]; \ databox##k.bsize2 = dbox->imax[2] - dbox->imin[2]; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } #define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \ hypre_Boxloop databox##k; \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = 0; \ databox##k.bsize0 = 0; \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } /* RL: TODO loop_size out of box struct, bsize +1 */ /* Given input 1-D 'idx' in box, get 3-D 'local_idx' in loop_size */ #define hypre_newBoxLoopDeclare(box) \ hypre_Index local_idx; \ HYPRE_Int idx_local = idx; \ hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \ idx_local = idx_local / box.lsize0; \ hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \ idx_local = idx_local / box.lsize1; \ hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \ /* Given input 3-D 'local_idx', get 1-D 'hypre__i' in 'box' */ #define hypre_BoxLoopIncK(k, box, hypre__i) \ HYPRE_Int hypre_boxD##k = 1; \ HYPRE_Int hypre__i = 0; \ hypre__i += (hypre_IndexD(local_idx, 0) * box.strides0 + box.bstart0) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \ hypre__i += (hypre_IndexD(local_idx, 1) * box.strides1 + box.bstart1) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \ hypre__i += (hypre_IndexD(local_idx, 2) * box.strides2 + box.bstart2) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize2 + 1); /* get 3-D local_idx into 'index' */ #define hypre_BoxLoopGetIndex(index) \ index[0] = hypre_IndexD(local_idx, 0); \ index[1] = hypre_IndexD(local_idx, 1); \ index[2] = hypre_IndexD(local_idx, 2); /* BoxLoop 0 */ #define hypre_newBoxLoop0Begin(ndim, loop_size) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_newBoxLoop0End() \ }); \ } /* BoxLoop 1 */ #define hypre_newBoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); #define hypre_newBoxLoop1End(i1) \ }); \ } /* BoxLoop 2 */ #define hypre_newBoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); #define hypre_newBoxLoop2End(i1, i2) \ }); \ } /* BoxLoop 3 */ #define hypre_newBoxLoop3Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim,loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim,loop_size, dbox2, start2, stride2); \ hypre_BoxLoopDataDeclareK(3, ndim,loop_size, dbox3, start3, stride3); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ hypre_BoxLoopIncK(3, databox3, i3); #define hypre_newBoxLoop3End(i1, i2, i3) \ }); \ } /* BoxLoop 4 */ #define hypre_newBoxLoop4Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3, \ dbox4, start4, stride4, i4) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ hypre_BoxLoopDataDeclareK(3, ndim, loop_size, dbox3, start3, stride3); \ hypre_BoxLoopDataDeclareK(4, ndim, loop_size, dbox4, start4, stride4); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ hypre_BoxLoopIncK(3, databox3, i3); \ hypre_BoxLoopIncK(4, databox4, i4); #define hypre_newBoxLoop4End(i1, i2, i3, i4) \ }); \ } /* Basic BoxLoops have no boxes */ /* BoxLoop 1 */ #define zypre_newBasicBoxLoop1Begin(ndim, loop_size, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); /* BoxLoop 2 */ #define zypre_newBasicBoxLoop2Begin(ndim, loop_size, stride1, i1, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \ zypre_BasicBoxLoopDataDeclareK(2, ndim, loop_size, stride2); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ /* TODO: RL just parallel-for, it should not be here, better in utilities */ #define hypre_LoopBegin(size, idx) \ { \ BoxLoopforall(size, HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_LoopEnd() \ }); \ } /* Reduction BoxLoop1 */ #define hypre_BoxLoop1ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, reducesum) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); #define hypre_BoxLoop1ReductionEnd(i1, reducesum) \ }); \ } /* Reduction BoxLoop2 */ #define hypre_BoxLoop2ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, reducesum) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); #define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \ }); \ } /* Renamings */ #define hypre_BoxLoopBlock() 0 #define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin #define hypre_BoxLoop0For hypre_newBoxLoop0For #define hypre_BoxLoop0End hypre_newBoxLoop0End #define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin #define hypre_BoxLoop1For hypre_newBoxLoop1For #define hypre_BoxLoop1End hypre_newBoxLoop1End #define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin #define hypre_BoxLoop2For hypre_newBoxLoop2For #define hypre_BoxLoop2End hypre_newBoxLoop2End #define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin #define hypre_BoxLoop3For hypre_newBoxLoop3For #define hypre_BoxLoop3End hypre_newBoxLoop3End #define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin #define hypre_BoxLoop4For hypre_newBoxLoop4For #define hypre_BoxLoop4End hypre_newBoxLoop4End #define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin #define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin #endif #endif /* #ifndef HYPRE_BOXLOOP_CUDA_HEADER */
/****************************************************************************** * * Header info for the BoxLoop * *****************************************************************************/ /*-------------------------------------------------------------------------- * BoxLoop macros: *--------------------------------------------------------------------------*/ #ifndef HYPRE_BOXLOOP_CUDA_HEADER #define HYPRE_BOXLOOP_CUDA_HEADER #if (defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)) && !defined(HYPRE_USING_RAJA) && !defined(HYPRE_USING_KOKKOS) #define HYPRE_LAMBDA [=] __host__ __device__ /* TODO: RL: support 4-D */ typedef struct hypre_Boxloop_struct { HYPRE_Int lsize0, lsize1, lsize2; HYPRE_Int strides0, strides1, strides2; HYPRE_Int bstart0, bstart1, bstart2; HYPRE_Int bsize0, bsize1, bsize2; } hypre_Boxloop; #ifdef __cplusplus extern "C++" { #endif /* * ------------------------- parfor-loop ------------------------ */ template < typename LOOP_BODY > __global__ void forall_kernel(LOOP_BODY loop_body, HYPRE_Int length) { const HYPRE_Int idx = hypre_cuda_get_grid_thread_id < 1, 1 > (); /* * const HYPRE_Int number_threads = * hypre_cuda_get_grid_num_threads<1,1>(); */ if (idx < length) { loop_body(idx); } } template < typename LOOP_BODY > void BoxLoopforall(HYPRE_Int length, LOOP_BODY loop_body) { HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle()); if (exec_policy == HYPRE_EXEC_HOST) { #ifdef HYPRE_USING_OPENMP #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx); } } else if (exec_policy == HYPRE_EXEC_DEVICE) { const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); const dim3 gDim = hypre_GetDefaultDeviceGridDimension(length, "thread", bDim); HYPRE_CUDA_LAUNCH(forall_kernel, gDim, bDim, loop_body, length); } } /* * ------------------------------ parforreduction-loop * ----------------------------- */ template < typename LOOP_BODY, typename REDUCER > __global__ void reductionforall_kernel(HYPRE_Int length, REDUCER reducer, LOOP_BODY loop_body) { const HYPRE_Int thread_id = hypre_cuda_get_grid_thread_id < 1, 1 > (); const HYPRE_Int n_threads = hypre_cuda_get_grid_num_threads < 1, 1 > (); for (HYPRE_Int idx = thread_id; idx < length; idx += n_threads) { loop_body(idx, reducer); } /* reduction in block-level and the save the results in reducer */ reducer.BlockReduce(); } template < typename LOOP_BODY, typename REDUCER > void ReductionBoxLoopforall(HYPRE_Int length, REDUCER & reducer, LOOP_BODY loop_body) { if (length <= 0) { return; } HYPRE_ExecutionPolicy exec_policy = hypre_HandleStructExecPolicy(hypre_handle()); if (exec_policy == HYPRE_EXEC_HOST) { for (HYPRE_Int idx = 0; idx < length; idx++) { loop_body(idx, reducer); } } else if (exec_policy == HYPRE_EXEC_DEVICE) { const dim3 bDim = hypre_GetDefaultDeviceBlockDimension(); dim3 gDim = hypre_GetDefaultDeviceGridDimension(length, "thread", bDim); /* * Note: we assume gDim cannot exceed 1024 and bDim < WARP * WARP */ gDim.x = hypre_min(gDim.x, 1024); reducer.nblocks = gDim.x; /* * hypre_printf("length= %d, blocksize = %d, gridsize = %d\n", * length, bDim.x, gDim.x); */ HYPRE_CUDA_LAUNCH(reductionforall_kernel, gDim, bDim, length, reducer, loop_body); } } #ifdef __cplusplus } #endif /* Get 1-D length of the loop, in hypre__tot */ #define hypre_newBoxLoopInit(ndim, loop_size) \ HYPRE_Int hypre__tot = 1; \ for (HYPRE_Int hypre_d = 0; hypre_d < ndim; hypre_d ++) \ { \ hypre__tot *= loop_size[hypre_d]; \ } /* Initialize struct for box-k */ #define hypre_BoxLoopDataDeclareK(k, ndim, loop_size, dbox, start, stride) \ hypre_Boxloop databox##k; \ /* dim 0 */ \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = start[0] - dbox->imin[0]; \ databox##k.bsize0 = dbox->imax[0] - dbox->imin[0]; \ /* dim 1 */ \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = start[1] - dbox->imin[1]; \ databox##k.bsize1 = dbox->imax[1] - dbox->imin[1]; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ /* dim 2 */ \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = start[2] - dbox->imin[2]; \ databox##k.bsize2 = dbox->imax[2] - dbox->imin[2]; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } #define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \ hypre_Boxloop databox##k; \ databox##k.lsize0 = loop_size[0]; \ databox##k.strides0 = stride[0]; \ databox##k.bstart0 = 0; \ databox##k.bsize0 = 0; \ if (ndim > 1) \ { \ databox##k.lsize1 = loop_size[1]; \ databox##k.strides1 = stride[1]; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ else \ { \ databox##k.lsize1 = 1; \ databox##k.strides1 = 0; \ databox##k.bstart1 = 0; \ databox##k.bsize1 = 0; \ } \ if (ndim == 3) \ { \ databox##k.lsize2 = loop_size[2]; \ databox##k.strides2 = stride[2]; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } \ else \ { \ databox##k.lsize2 = 1; \ databox##k.strides2 = 0; \ databox##k.bstart2 = 0; \ databox##k.bsize2 = 0; \ } /* RL: TODO loop_size out of box struct, bsize +1 */ /* Given input 1-D 'idx' in box, get 3-D 'local_idx' in loop_size */ #define hypre_newBoxLoopDeclare(box) \ hypre_Index local_idx; \ HYPRE_Int idx_local = idx; \ hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \ idx_local = idx_local / box.lsize0; \ hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \ idx_local = idx_local / box.lsize1; \ hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \ /* Given input 3-D 'local_idx', get 1-D 'hypre__i' in 'box' */ #define hypre_BoxLoopIncK(k, box, hypre__i) \ HYPRE_Int hypre_boxD##k = 1; \ HYPRE_Int hypre__i = 0; \ hypre__i += (hypre_IndexD(local_idx, 0) * box.strides0 + box.bstart0) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \ hypre__i += (hypre_IndexD(local_idx, 1) * box.strides1 + box.bstart1) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \ hypre__i += (hypre_IndexD(local_idx, 2) * box.strides2 + box.bstart2) * hypre_boxD##k; \ hypre_boxD##k *= hypre_max(0, box.bsize2 + 1); /* get 3-D local_idx into 'index' */ #define hypre_BoxLoopGetIndex(index) \ index[0] = hypre_IndexD(local_idx, 0); \ index[1] = hypre_IndexD(local_idx, 1); \ index[2] = hypre_IndexD(local_idx, 2); /* BoxLoop 0 */ #define hypre_newBoxLoop0Begin(ndim, loop_size) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_newBoxLoop0End() \ }); \ } /* BoxLoop 1 */ #define hypre_newBoxLoop1Begin(ndim, loop_size, dbox1, start1, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); #define hypre_newBoxLoop1End(i1) \ }); \ } /* BoxLoop 2 */ #define hypre_newBoxLoop2Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); #define hypre_newBoxLoop2End(i1, i2) \ }); \ } /* BoxLoop 3 */ #define hypre_newBoxLoop3Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim,loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim,loop_size, dbox2, start2, stride2); \ hypre_BoxLoopDataDeclareK(3, ndim,loop_size, dbox3, start3, stride3); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ hypre_BoxLoopIncK(3, databox3, i3); #define hypre_newBoxLoop3End(i1, i2, i3) \ }); \ } /* BoxLoop 4 */ #define hypre_newBoxLoop4Begin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, \ dbox3, start3, stride3, i3, \ dbox4, start4, stride4, i4) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ hypre_BoxLoopDataDeclareK(3, ndim, loop_size, dbox3, start3, stride3); \ hypre_BoxLoopDataDeclareK(4, ndim, loop_size, dbox4, start4, stride4); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ hypre_BoxLoopIncK(3, databox3, i3); \ hypre_BoxLoopIncK(4, databox4, i4); #define hypre_newBoxLoop4End(i1, i2, i3, i4) \ }); \ } /* Basic BoxLoops have no boxes */ /* BoxLoop 1 */ #define zypre_newBasicBoxLoop1Begin(ndim, loop_size, stride1, i1) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); /* BoxLoop 2 */ #define zypre_newBasicBoxLoop2Begin(ndim, loop_size, stride1, i1, stride2, i2) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ zypre_BasicBoxLoopDataDeclareK(1, ndim, loop_size, stride1); \ zypre_BasicBoxLoopDataDeclareK(2, ndim, loop_size, stride2); \ BoxLoopforall(hypre__tot, HYPRE_LAMBDA (HYPRE_Int idx) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); \ /* TODO: RL just parallel-for, it should not be here, better in utilities */ #define hypre_LoopBegin(size, idx) \ { \ BoxLoopforall(size, HYPRE_LAMBDA (HYPRE_Int idx) \ { #define hypre_LoopEnd() \ }); \ } /* Reduction BoxLoop1 */ #define hypre_BoxLoop1ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, reducesum) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); #define hypre_BoxLoop1ReductionEnd(i1, reducesum) \ }); \ } /* Reduction BoxLoop2 */ #define hypre_BoxLoop2ReductionBegin(ndim, loop_size, dbox1, start1, stride1, i1, \ dbox2, start2, stride2, i2, reducesum) \ { \ hypre_newBoxLoopInit(ndim, loop_size); \ hypre_BoxLoopDataDeclareK(1, ndim, loop_size, dbox1, start1, stride1); \ hypre_BoxLoopDataDeclareK(2, ndim, loop_size, dbox2, start2, stride2); \ ReductionBoxLoopforall(hypre__tot, reducesum, HYPRE_LAMBDA (HYPRE_Int idx, decltype(reducesum) &reducesum) \ { \ hypre_newBoxLoopDeclare(databox1); \ hypre_BoxLoopIncK(1, databox1, i1); \ hypre_BoxLoopIncK(2, databox2, i2); #define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \ }); \ } /* Renamings */ #define hypre_BoxLoopBlock() 0 #define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin #define hypre_BoxLoop0For hypre_newBoxLoop0For #define hypre_BoxLoop0End hypre_newBoxLoop0End #define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin #define hypre_BoxLoop1For hypre_newBoxLoop1For #define hypre_BoxLoop1End hypre_newBoxLoop1End #define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin #define hypre_BoxLoop2For hypre_newBoxLoop2For #define hypre_BoxLoop2End hypre_newBoxLoop2End #define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin #define hypre_BoxLoop3For hypre_newBoxLoop3For #define hypre_BoxLoop3End hypre_newBoxLoop3End #define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin #define hypre_BoxLoop4For hypre_newBoxLoop4For #define hypre_BoxLoop4End hypre_newBoxLoop4End #define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin #define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin #endif #endif /* #ifndef HYPRE_BOXLOOP_CUDA_HEADER */
time_omp_fib.c
#include <stdio.h> /* for printf() */ #include <assert.h> /* for assert() */ #include <omp.h> #include <qthread/qthread.h> #include <qthread/qtimer.h> #include "argparsing.h" static aligned_t validation[] = { 0, // 0 1, // 1 1, // 2 2, // 3 3, // 4 5, // 5 8, // 6 13, // 7 21, // 8 34, // 9 55, // 10 89, // 11 144, // 12 233, // 13 377, // 14 610, // 15 987, // 16 1597, // 17 2584, // 18 4181, // 19 6765, // 20 10946, // 21 17711, // 22 28657, // 23 46368, // 24 75025, // 25 121393, // 26 196418, // 27 317811, // 28 514229, // 29 832040, // 30 1346269, // 31 2178309, // 32 3524578, // 33 5702887, // 34 9227465, // 35 14930352, // 36 24157817, // 37 39088169 // 38 }; static aligned_t fib(void *arg_) { aligned_t *n = (aligned_t *)arg_; if (*n < 2) { return *n; } aligned_t ret1, ret2; aligned_t n1 = *n - 1; aligned_t n2 = *n - 2; #pragma omp task default(none) shared(ret1,n1) ret1 = fib(&n1); #pragma omp task default(none) shared(ret2,n2) ret2 = fib(&n2); #pragma omp taskwait return ret1 + ret2; } int main(int argc, char *argv[]) { qtimer_t timer = qtimer_create(); aligned_t n = 20; aligned_t ret = 0; int threads = 1; /* setup */ CHECK_VERBOSE(); NUMARG(n, "FIB_INPUT"); #pragma omp parallel #pragma omp single { threads = omp_get_num_threads(); qtimer_start(timer); #pragma omp task default(none) shared(ret,n) ret = fib(&n); #pragma omp taskwait qtimer_stop(timer); } if (validation[n] == ret) { fprintf(stdout, "%d %lu %lu %f\n", threads, (unsigned long)n, (unsigned long)ret, qtimer_secs(timer)); } else { iprintf("Fail %lu (== %lu) in %f sec\n", (unsigned long)ret, (unsigned long)validation[n], qtimer_secs(timer)); } qtimer_destroy(timer); return 0; } /* vim:set expandtab */
#include <stdio.h> /* for printf() */ #include <assert.h> /* for assert() */ #include <omp.h> #include <qthread/qthread.h> #include <qthread/qtimer.h> #include "argparsing.h" static aligned_t validation[] = { 0, //0 1, //1 1, //2 2, //3 3, //4 5, //5 8, //6 13, //7 21, //8 34, //9 55, //10 89, //11 144, //12 233, //13 377, //14 610, //15 987, //16 1597, //17 2584, //18 4181, //19 6765, //20 10946, //21 17711, //22 28657, //23 46368, //24 75025, //25 121393, //26 196418, //27 317811, //28 514229, //29 832040, //30 1346269, //31 2178309, //32 3524578, //33 5702887, //34 9227465, //35 14930352, //36 24157817, //37 39088169 // 38 }; static aligned_t fib(void *arg_) { aligned_t *n = (aligned_t *) arg_; if (*n < 2) { return *n; } aligned_t ret1, ret2; aligned_t n1 = *n - 1; aligned_t n2 = *n - 2; ret1 = fib(&n1); ret2 = fib(&n2); return ret1 + ret2; } int main(int argc, char *argv[]) { qtimer_t timer = qtimer_create(); aligned_t n = 20; aligned_t ret = 0; int threads = 1; /* setup */ CHECK_VERBOSE(); NUMARG(n, "FIB_INPUT"); threads = omp_get_num_threads(); qtimer_start(timer); ret = fib(&n); qtimer_stop(timer); if (validation[n] == ret) { fprintf(stdout, "%d %lu %lu %f\n", threads, (unsigned long)n, (unsigned long)ret, qtimer_secs(timer)); } else { iprintf("Fail %lu (== %lu) in %f sec\n", (unsigned long)ret, (unsigned long)validation[n], qtimer_secs(timer)); } qtimer_destroy(timer); return 0; } /* vim:set expandtab */
#include <stdio.h> /* for printf() */ #include <assert.h> /* for assert() */ #include <omp.h> #include <qthread/qthread.h> #include <qthread/qtimer.h> #include "argparsing.h" static aligned_t validation[] = { 0, //0 1, //1 1, //2 2, //3 3, //4 5, //5 8, //6 13, //7 21, //8 34, //9 55, //10 89, //11 144, //12 233, //13 377, //14 610, //15 987, //16 1597, //17 2584, //18 4181, //19 6765, //20 10946, //21 17711, //22 28657, //23 46368, //24 75025, //25 121393, //26 196418, //27 317811, //28 514229, //29 832040, //30 1346269, //31 2178309, //32 3524578, //33 5702887, //34 9227465, //35 14930352, //36 24157817, //37 39088169 // 38 }; static aligned_t fib(void *arg_) { aligned_t *n = (aligned_t *) arg_; if (*n < 2) { return *n; } aligned_t ret1, ret2; aligned_t n1 = *n - 1; aligned_t n2 = *n - 2; #pragma omp task default(none) shared(ret1,n1) ret1 = fib(&n1); #pragma omp task default(none) shared(ret2,n2) ret2 = fib(&n2); #pragma omp taskwait return ret1 + ret2; } int main(int argc, char *argv[]) { qtimer_t timer = qtimer_create(); aligned_t n = 20; aligned_t ret = 0; int threads = 1; /* setup */ CHECK_VERBOSE(); NUMARG(n, "FIB_INPUT"); #pragma omp parallel #pragma omp single { threads = omp_get_num_threads(); qtimer_start(timer); #pragma omp task default(none) shared(ret,n) ret = fib(&n); #pragma omp taskwait qtimer_stop(timer); } if (validation[n] == ret) { fprintf(stdout, "%d %lu %lu %f\n", threads, (unsigned long)n, (unsigned long)ret, qtimer_secs(timer)); } else { iprintf("Fail %lu (== %lu) in %f sec\n", (unsigned long)ret, (unsigned long)validation[n], qtimer_secs(timer)); } qtimer_destroy(timer); return 0; } /* vim:set expandtab */
GB_binop__eq_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32) // A*D function (colscale): GB (_AxD__eq_uint32) // D*A function (rowscale): GB (_DxB__eq_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32) // C=scalar+B GB (_bind1st__eq_uint32) // C=scalar+B' GB (_bind1st_tran__eq_uint32) // C=A+scalar GB (_bind2nd__eq_uint32) // C=A'+scalar GB (_bind2nd_tran__eq_uint32) // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32) // A*D function (colscale): GB (_AxD__eq_uint32) // D*A function (rowscale): GB (_DxB__eq_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32) // C=scalar+B GB (_bind1st__eq_uint32) // C=scalar+B' GB (_bind1st_tran__eq_uint32) // C=A+scalar GB (_bind2nd__eq_uint32) // C=A'+scalar GB (_bind2nd_tran__eq_uint32) // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32) // A*D function (colscale): GB (_AxD__eq_uint32) // D*A function (rowscale): GB (_DxB__eq_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32) // C=scalar+B GB (_bind1st__eq_uint32) // C=scalar+B' GB (_bind1st_tran__eq_uint32) // C=A+scalar GB (_bind2nd__eq_uint32) // C=A'+scalar GB (_bind2nd_tran__eq_uint32) // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wino_conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haoluo@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "wino_conv_kernel_x86.h" #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = WINO_MAX(data[i], ( float )0); if (activation > 0) { data[i] = WINO_MIN(data[i], ( float )activation); } } } static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float)); } } // pad 0 in right and down side on 3D void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float)); } } // pad 0 in right and down side on 3D void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block, float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch, int outw, int outh, int outch, int num_thread) { size_t elemsize = sizeof(float); const float* bias = _bias; // pad to 4n+2, winograd F(4,3) float* bottom_blob_bordered = bottom_blob; int outw_align = (outw + 3) / 4 * 4; int outh_align = (outh + 3) / 4 * 4; w = outw_align + 2; h = outh_align + 2; // BEGIN transform input float* bottom_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 4 * inch * tiles; bottom_blob_tm = transform_input; // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered + q * w * h; for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q; float* out_tm1 = out_tm0 + tiles_n; float* out_tm2 = out_tm0 + 2 * tiles_n; float* out_tm3 = out_tm0 + 3 * tiles_n; float* out_tm4 = out_tm0 + 4 * tiles_n; float* out_tm5 = out_tm0 + 5 * tiles_n; float* out_tm6 = out_tm0 + 6 * tiles_n; float* out_tm7 = out_tm0 + 7 * tiles_n; float* out_tm8 = out_tm0 + 8 * tiles_n; #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #ifdef _WIN32 { _t0.m256_f32[0] = _w0.m256_f32[0]; _t1.m256_f32[0] = _w0.m256_f32[1]; _t2.m256_f32[0] = _w0.m256_f32[2]; _t3.m256_f32[0] = _w0.m256_f32[3]; _t4.m256_f32[0] = _w0.m256_f32[4]; _t5.m256_f32[0] = _w0.m256_f32[5]; _t0.m256_f32[1] = _w1.m256_f32[0]; _t1.m256_f32[1] = _w1.m256_f32[1]; _t2.m256_f32[1] = _w1.m256_f32[2]; _t3.m256_f32[1] = _w1.m256_f32[3]; _t4.m256_f32[1] = _w1.m256_f32[4]; _t5.m256_f32[1] = _w1.m256_f32[5]; _t0.m256_f32[2] = _w2.m256_f32[0]; _t1.m256_f32[2] = _w2.m256_f32[1]; _t2.m256_f32[2] = _w2.m256_f32[2]; _t3.m256_f32[2] = _w2.m256_f32[3]; _t4.m256_f32[2] = _w2.m256_f32[4]; _t5.m256_f32[2] = _w2.m256_f32[5]; _t0.m256_f32[3] = _w3.m256_f32[0]; _t1.m256_f32[3] = _w3.m256_f32[1]; _t2.m256_f32[3] = _w3.m256_f32[2]; _t3.m256_f32[3] = _w3.m256_f32[3]; _t4.m256_f32[3] = _w3.m256_f32[4]; _t5.m256_f32[3] = _w3.m256_f32[5]; _t0.m256_f32[4] = _w4.m256_f32[0]; _t1.m256_f32[4] = _w4.m256_f32[1]; _t2.m256_f32[4] = _w4.m256_f32[2]; _t3.m256_f32[4] = _w4.m256_f32[3]; _t4.m256_f32[4] = _w4.m256_f32[4]; _t5.m256_f32[4] = _w4.m256_f32[5]; _t0.m256_f32[5] = _w5.m256_f32[0]; _t1.m256_f32[5] = _w5.m256_f32[1]; _t2.m256_f32[5] = _w5.m256_f32[2]; _t3.m256_f32[5] = _w5.m256_f32[3]; _t4.m256_f32[5] = _w5.m256_f32[4]; _t5.m256_f32[5] = _w5.m256_f32[5]; } #else { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f}; _mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f}; _mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f}; _mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f}; _mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f}; _mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f}; _mm256_storeu_ps(output_n5, _n5); out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3]; out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1]; out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5]; out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3]; out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1]; out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5]; out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3]; out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1]; out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5]; #else float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } // BEGIN dot float* top_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 36 * tiles; top_blob_tm = dot_block; #pragma omp parallel for num_threads(num_thread) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp << 3; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); float* output4_tm = top_blob_tm + tiles_n * (p + 4); float* output5_tm = top_blob_tm + tiles_n * (p + 5); float* output6_tm = top_blob_tm + tiles_n * (p + 6); float* output7_tm = top_blob_tm + tiles_n * (p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q = 0; for (; q + 3 < inch; q = q + 4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0 + 4); __m128 _r2 = _mm_loadu_ps(r0 + 8); __m128 _r3 = _mm_loadu_ps(r0 + 12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for (; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm + 36 * tiles * p; output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 4; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } } } // END dot // BEGIN transform output float* top_blob_bordered = NULL; if (outw_align == outw && outh_align == outh) { top_blob_bordered = top_blob; } else { top_blob_bordered = output_bordered; } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { float* out_tile = top_blob_tm + 36 * tiles * p; float* outRow0 = top_blob_bordered + outw_align * outh_align * p; float* outRow1 = outRow0 + outw_align; float* outRow2 = outRow0 + outw_align * 2; float* outRow3 = outRow0 + outw_align * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw_align * 3; outRow1 += outw_align * 3; outRow2 += outw_align * 3; outRow3 += outw_align * 3; } } } // END transform output if (outw_align != outw || outh_align != outw) { delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0); } } void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch) { float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float)); // G const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}}; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3] = {0}; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } float* kernel_tm_test = kernel_wino; for (int r = 0; r < 9; r++) { int p = 0; for (; p + 7 < outch; p += 8) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36; const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36; const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36; const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36; float* ktmp = kernel_tm_test + p / 8 * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm_test += 4 * inch * outch; } free(kernel_tm); } int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int batch = input_tensor->dims[0]; int input_c = input_tensor->dims[1]; int input_h = input_tensor->dims[2]; int input_w = input_tensor->dims[3]; int output_c = output_tensor->dims[1]; int output_h = output_tensor->dims[2]; int output_w = output_tensor->dims[3]; int pad_h = param->pad_h0; int pad_w = param->pad_w0; float* kernel = ( float* )filter_tensor->data; if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } int block_h = (output_h + TILE - 1) / TILE; int block_w = (output_w + TILE - 1) / TILE; int block = block_h * block_w; int padded_inh = TILE * block_h + 2; int padded_inw = TILE * block_w + 2; int pad_inhw = padded_inh * padded_inw; int outw = block_w * TILE; int outh = block_h * TILE; priv_info->input_pad = ( float* )sys_malloc(batch * input_c * pad_inhw * sizeof(float)); memset(priv_info->input_pad, 0, batch * input_c * pad_inhw * sizeof(float)); priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float)); priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float)); priv_info->output_bordered = NULL; if (outw != output_w || outh != output_h) { priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float)); } conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (priv_info->input_pad) { sys_free(priv_info->input_pad); priv_info->input_pad = NULL; } if (priv_info->dot_block) { sys_free(priv_info->dot_block); priv_info->dot_block = NULL; } if (priv_info->transform_input) { sys_free(priv_info->transform_input); priv_info->transform_input = NULL; } if (priv_info->output_bordered) { sys_free(priv_info->output_bordered); priv_info->output_bordered = NULL; } return 0; } int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int group = param->group; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_c_g = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int input_size_g = in_c_g * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* biases = NULL; if (bias_tensor != NULL) biases = ( float* )bias_tensor->data; for (int i = 0; i < batch; i++) { for (int g = 0; g < group; g++) { pad_0_align_3D((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w, in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0); conv3x3s1_winograd43_sse((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g, output + i * out_c * out_h * out_w, priv_info->interleave_buffer, priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered, biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread); } } if (act_type >= 0) { relu(output, batch * output_size, act_type); } return 0; }
/* * Copyright (c) 2020, OPEN AI LAB * Author: haoluo@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "wino_conv_kernel_x86.h" #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = WINO_MAX(data[i], ( float )0); if (activation > 0) { data[i] = WINO_MIN(data[i], ( float )activation); } } } static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float)); } } // pad 0 in right and down side on 3D void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float)); } } // pad 0 in right and down side on 3D void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block, float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch, int outw, int outh, int outch, int num_thread) { size_t elemsize = sizeof(float); const float* bias = _bias; // pad to 4n+2, winograd F(4,3) float* bottom_blob_bordered = bottom_blob; int outw_align = (outw + 3) / 4 * 4; int outh_align = (outh + 3) / 4 * 4; w = outw_align + 2; h = outh_align + 2; // BEGIN transform input float* bottom_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 4 * inch * tiles; bottom_blob_tm = transform_input; // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered + q * w * h; for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q; float* out_tm1 = out_tm0 + tiles_n; float* out_tm2 = out_tm0 + 2 * tiles_n; float* out_tm3 = out_tm0 + 3 * tiles_n; float* out_tm4 = out_tm0 + 4 * tiles_n; float* out_tm5 = out_tm0 + 5 * tiles_n; float* out_tm6 = out_tm0 + 6 * tiles_n; float* out_tm7 = out_tm0 + 7 * tiles_n; float* out_tm8 = out_tm0 + 8 * tiles_n; #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #ifdef _WIN32 { _t0.m256_f32[0] = _w0.m256_f32[0]; _t1.m256_f32[0] = _w0.m256_f32[1]; _t2.m256_f32[0] = _w0.m256_f32[2]; _t3.m256_f32[0] = _w0.m256_f32[3]; _t4.m256_f32[0] = _w0.m256_f32[4]; _t5.m256_f32[0] = _w0.m256_f32[5]; _t0.m256_f32[1] = _w1.m256_f32[0]; _t1.m256_f32[1] = _w1.m256_f32[1]; _t2.m256_f32[1] = _w1.m256_f32[2]; _t3.m256_f32[1] = _w1.m256_f32[3]; _t4.m256_f32[1] = _w1.m256_f32[4]; _t5.m256_f32[1] = _w1.m256_f32[5]; _t0.m256_f32[2] = _w2.m256_f32[0]; _t1.m256_f32[2] = _w2.m256_f32[1]; _t2.m256_f32[2] = _w2.m256_f32[2]; _t3.m256_f32[2] = _w2.m256_f32[3]; _t4.m256_f32[2] = _w2.m256_f32[4]; _t5.m256_f32[2] = _w2.m256_f32[5]; _t0.m256_f32[3] = _w3.m256_f32[0]; _t1.m256_f32[3] = _w3.m256_f32[1]; _t2.m256_f32[3] = _w3.m256_f32[2]; _t3.m256_f32[3] = _w3.m256_f32[3]; _t4.m256_f32[3] = _w3.m256_f32[4]; _t5.m256_f32[3] = _w3.m256_f32[5]; _t0.m256_f32[4] = _w4.m256_f32[0]; _t1.m256_f32[4] = _w4.m256_f32[1]; _t2.m256_f32[4] = _w4.m256_f32[2]; _t3.m256_f32[4] = _w4.m256_f32[3]; _t4.m256_f32[4] = _w4.m256_f32[4]; _t5.m256_f32[4] = _w4.m256_f32[5]; _t0.m256_f32[5] = _w5.m256_f32[0]; _t1.m256_f32[5] = _w5.m256_f32[1]; _t2.m256_f32[5] = _w5.m256_f32[2]; _t3.m256_f32[5] = _w5.m256_f32[3]; _t4.m256_f32[5] = _w5.m256_f32[4]; _t5.m256_f32[5] = _w5.m256_f32[5]; } #else { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f}; _mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f}; _mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f}; _mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f}; _mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f}; _mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f}; _mm256_storeu_ps(output_n5, _n5); out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3]; out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1]; out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5]; out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3]; out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1]; out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5]; out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3]; out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1]; out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5]; #else float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } // BEGIN dot float* top_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 36 * tiles; top_blob_tm = dot_block; for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp << 3; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); float* output4_tm = top_blob_tm + tiles_n * (p + 4); float* output5_tm = top_blob_tm + tiles_n * (p + 5); float* output6_tm = top_blob_tm + tiles_n * (p + 6); float* output7_tm = top_blob_tm + tiles_n * (p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q = 0; for (; q + 3 < inch; q = q + 4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0 + 4); __m128 _r2 = _mm_loadu_ps(r0 + 8); __m128 _r3 = _mm_loadu_ps(r0 + 12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for (; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm + 36 * tiles * p; output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 4; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } } } // END dot // BEGIN transform output float* top_blob_bordered = NULL; if (outw_align == outw && outh_align == outh) { top_blob_bordered = top_blob; } else { top_blob_bordered = output_bordered; } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; for (int p = 0; p < outch; p++) { float* out_tile = top_blob_tm + 36 * tiles * p; float* outRow0 = top_blob_bordered + outw_align * outh_align * p; float* outRow1 = outRow0 + outw_align; float* outRow2 = outRow0 + outw_align * 2; float* outRow3 = outRow0 + outw_align * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw_align * 3; outRow1 += outw_align * 3; outRow2 += outw_align * 3; outRow3 += outw_align * 3; } } } // END transform output if (outw_align != outw || outh_align != outw) { delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0); } } void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch) { float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float)); // G const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}}; for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3] = {0}; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } float* kernel_tm_test = kernel_wino; for (int r = 0; r < 9; r++) { int p = 0; for (; p + 7 < outch; p += 8) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36; const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36; const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36; const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36; float* ktmp = kernel_tm_test + p / 8 * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm_test += 4 * inch * outch; } free(kernel_tm); } int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int batch = input_tensor->dims[0]; int input_c = input_tensor->dims[1]; int input_h = input_tensor->dims[2]; int input_w = input_tensor->dims[3]; int output_c = output_tensor->dims[1]; int output_h = output_tensor->dims[2]; int output_w = output_tensor->dims[3]; int pad_h = param->pad_h0; int pad_w = param->pad_w0; float* kernel = ( float* )filter_tensor->data; if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } int block_h = (output_h + TILE - 1) / TILE; int block_w = (output_w + TILE - 1) / TILE; int block = block_h * block_w; int padded_inh = TILE * block_h + 2; int padded_inw = TILE * block_w + 2; int pad_inhw = padded_inh * padded_inw; int outw = block_w * TILE; int outh = block_h * TILE; priv_info->input_pad = ( float* )sys_malloc(batch * input_c * pad_inhw * sizeof(float)); memset(priv_info->input_pad, 0, batch * input_c * pad_inhw * sizeof(float)); priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float)); priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float)); priv_info->output_bordered = NULL; if (outw != output_w || outh != output_h) { priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float)); } conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (priv_info->input_pad) { sys_free(priv_info->input_pad); priv_info->input_pad = NULL; } if (priv_info->dot_block) { sys_free(priv_info->dot_block); priv_info->dot_block = NULL; } if (priv_info->transform_input) { sys_free(priv_info->transform_input); priv_info->transform_input = NULL; } if (priv_info->output_bordered) { sys_free(priv_info->output_bordered); priv_info->output_bordered = NULL; } return 0; } int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int group = param->group; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_c_g = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int input_size_g = in_c_g * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* biases = NULL; if (bias_tensor != NULL) biases = ( float* )bias_tensor->data; for (int i = 0; i < batch; i++) { for (int g = 0; g < group; g++) { pad_0_align_3D((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w, in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0); conv3x3s1_winograd43_sse((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g, output + i * out_c * out_h * out_w, priv_info->interleave_buffer, priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered, biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread); } } if (act_type >= 0) { relu(output, batch * output_size, act_type); } return 0; }
/* * Copyright (c) 2020, OPEN AI LAB * Author: haoluo@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "wino_conv_kernel_x86.h" #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = WINO_MAX(data[i], ( float )0); if (activation > 0) { data[i] = WINO_MIN(data[i], ( float )activation); } } } static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float)); } } // pad 0 in right and down side on 3D void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float)); } } // pad 0 in right and down side on 3D void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block, float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch, int outw, int outh, int outch, int num_thread) { size_t elemsize = sizeof(float); const float* bias = _bias; // pad to 4n+2, winograd F(4,3) float* bottom_blob_bordered = bottom_blob; int outw_align = (outw + 3) / 4 * 4; int outh_align = (outh + 3) / 4 * 4; w = outw_align + 2; h = outh_align + 2; // BEGIN transform input float* bottom_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 4 * inch * tiles; bottom_blob_tm = transform_input; // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered + q * w * h; for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q; float* out_tm1 = out_tm0 + tiles_n; float* out_tm2 = out_tm0 + 2 * tiles_n; float* out_tm3 = out_tm0 + 3 * tiles_n; float* out_tm4 = out_tm0 + 4 * tiles_n; float* out_tm5 = out_tm0 + 5 * tiles_n; float* out_tm6 = out_tm0 + 6 * tiles_n; float* out_tm7 = out_tm0 + 7 * tiles_n; float* out_tm8 = out_tm0 + 8 * tiles_n; #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #ifdef _WIN32 { _t0.m256_f32[0] = _w0.m256_f32[0]; _t1.m256_f32[0] = _w0.m256_f32[1]; _t2.m256_f32[0] = _w0.m256_f32[2]; _t3.m256_f32[0] = _w0.m256_f32[3]; _t4.m256_f32[0] = _w0.m256_f32[4]; _t5.m256_f32[0] = _w0.m256_f32[5]; _t0.m256_f32[1] = _w1.m256_f32[0]; _t1.m256_f32[1] = _w1.m256_f32[1]; _t2.m256_f32[1] = _w1.m256_f32[2]; _t3.m256_f32[1] = _w1.m256_f32[3]; _t4.m256_f32[1] = _w1.m256_f32[4]; _t5.m256_f32[1] = _w1.m256_f32[5]; _t0.m256_f32[2] = _w2.m256_f32[0]; _t1.m256_f32[2] = _w2.m256_f32[1]; _t2.m256_f32[2] = _w2.m256_f32[2]; _t3.m256_f32[2] = _w2.m256_f32[3]; _t4.m256_f32[2] = _w2.m256_f32[4]; _t5.m256_f32[2] = _w2.m256_f32[5]; _t0.m256_f32[3] = _w3.m256_f32[0]; _t1.m256_f32[3] = _w3.m256_f32[1]; _t2.m256_f32[3] = _w3.m256_f32[2]; _t3.m256_f32[3] = _w3.m256_f32[3]; _t4.m256_f32[3] = _w3.m256_f32[4]; _t5.m256_f32[3] = _w3.m256_f32[5]; _t0.m256_f32[4] = _w4.m256_f32[0]; _t1.m256_f32[4] = _w4.m256_f32[1]; _t2.m256_f32[4] = _w4.m256_f32[2]; _t3.m256_f32[4] = _w4.m256_f32[3]; _t4.m256_f32[4] = _w4.m256_f32[4]; _t5.m256_f32[4] = _w4.m256_f32[5]; _t0.m256_f32[5] = _w5.m256_f32[0]; _t1.m256_f32[5] = _w5.m256_f32[1]; _t2.m256_f32[5] = _w5.m256_f32[2]; _t3.m256_f32[5] = _w5.m256_f32[3]; _t4.m256_f32[5] = _w5.m256_f32[4]; _t5.m256_f32[5] = _w5.m256_f32[5]; } #else { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f}; _mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f}; _mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f}; _mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f}; _mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f}; _mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f}; _mm256_storeu_ps(output_n5, _n5); out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3]; out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1]; out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5]; out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3]; out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1]; out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5]; out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3]; out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1]; out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5]; #else float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } // BEGIN dot float* top_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 36 * tiles; top_blob_tm = dot_block; #pragma omp parallel for num_threads(num_thread) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp << 3; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); float* output4_tm = top_blob_tm + tiles_n * (p + 4); float* output5_tm = top_blob_tm + tiles_n * (p + 5); float* output6_tm = top_blob_tm + tiles_n * (p + 6); float* output7_tm = top_blob_tm + tiles_n * (p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q = 0; for (; q + 3 < inch; q = q + 4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0 + 4); __m128 _r2 = _mm_loadu_ps(r0 + 8); __m128 _r3 = _mm_loadu_ps(r0 + 12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for (; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm + 36 * tiles * p; output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 4; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } } } // END dot // BEGIN transform output float* top_blob_bordered = NULL; if (outw_align == outw && outh_align == outh) { top_blob_bordered = top_blob; } else { top_blob_bordered = output_bordered; } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { float* out_tile = top_blob_tm + 36 * tiles * p; float* outRow0 = top_blob_bordered + outw_align * outh_align * p; float* outRow1 = outRow0 + outw_align; float* outRow2 = outRow0 + outw_align * 2; float* outRow3 = outRow0 + outw_align * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw_align * 3; outRow1 += outw_align * 3; outRow2 += outw_align * 3; outRow3 += outw_align * 3; } } } // END transform output if (outw_align != outw || outh_align != outw) { delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0); } } void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch) { float* kernel_tm = ( float* )sys_malloc(6 * 6 * inch * outch * sizeof(float)); // G const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}}; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3] = {0}; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } float* kernel_tm_test = kernel_wino; for (int r = 0; r < 9; r++) { int p = 0; for (; p + 7 < outch; p += 8) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36; const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36; const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36; const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36; float* ktmp = kernel_tm_test + p / 8 * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm_test += 4 * inch * outch; } free(kernel_tm); } int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int batch = input_tensor->dims[0]; int input_c = input_tensor->dims[1]; int input_h = input_tensor->dims[2]; int input_w = input_tensor->dims[3]; int output_c = output_tensor->dims[1]; int output_h = output_tensor->dims[2]; int output_w = output_tensor->dims[3]; int pad_h = param->pad_h0; int pad_w = param->pad_w0; float* kernel = ( float* )filter_tensor->data; if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } int block_h = (output_h + TILE - 1) / TILE; int block_w = (output_w + TILE - 1) / TILE; int block = block_h * block_w; int padded_inh = TILE * block_h + 2; int padded_inw = TILE * block_w + 2; int pad_inhw = padded_inh * padded_inw; int outw = block_w * TILE; int outh = block_h * TILE; priv_info->input_pad = ( float* )sys_malloc(batch * input_c * pad_inhw * sizeof(float)); memset(priv_info->input_pad, 0, batch * input_c * pad_inhw * sizeof(float)); priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * block * output_c * sizeof(float)); priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * block * input_c * sizeof(float)); priv_info->output_bordered = NULL; if (outw != output_w || outh != output_h) { priv_info->output_bordered = ( float* )sys_malloc(outw * outh * output_c * sizeof(float)); } conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (priv_info->input_pad) { sys_free(priv_info->input_pad); priv_info->input_pad = NULL; } if (priv_info->dot_block) { sys_free(priv_info->dot_block); priv_info->dot_block = NULL; } if (priv_info->transform_input) { sys_free(priv_info->transform_input); priv_info->transform_input = NULL; } if (priv_info->output_bordered) { sys_free(priv_info->output_bordered); priv_info->output_bordered = NULL; } return 0; } int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int group = param->group; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_c_g = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int input_size_g = in_c_g * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* biases = NULL; if (bias_tensor != NULL) biases = ( float* )bias_tensor->data; for (int i = 0; i < batch; i++) { for (int g = 0; g < group; g++) { pad_0_align_3D((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w, in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0); conv3x3s1_winograd43_sse((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g, output + i * out_c * out_h * out_w, priv_info->interleave_buffer, priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered, biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread); } } if (act_type >= 0) { relu(output, batch * output_size, act_type); } return 0; }
GB_unjumbled_template.c
//------------------------------------------------------------------------------ // GB_unjumble_template: unjumble the vectors of a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { //---------------------------------------------------------------------- // get the task description //---------------------------------------------------------------------- int64_t kfirst = A_slice [tid] ; int64_t klast = A_slice [tid+1] ; //---------------------------------------------------------------------- // sort vectors kfirst to klast //---------------------------------------------------------------------- for (int64_t k = kfirst ; k < klast ; k++) { //------------------------------------------------------------------ // check if the vector needs sorting //------------------------------------------------------------------ bool jumbled = false ; int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; int64_t ilast = -1 ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { int64_t i = Ai [pA] ; if (i < ilast) { jumbled = true ; break ; } ilast = i ; } //------------------------------------------------------------------ // sort the vector //------------------------------------------------------------------ if (jumbled) { int64_t aknz = pA_end - pA_start ; GB_QSORT_WORKER ; } } } } #undef GB_QSORT_WORKER
// ------------------------------------------------------------------------------ //GB_unjumble_template:unjumble the vectors of a matrix // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2021, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ { int tid; for (tid = 0; tid < ntasks; tid++) { //---------------------------------------------------------------------- //get the task description // ---------------------------------------------------------------------- int64_t kfirst = A_slice[tid]; int64_t klast = A_slice[tid + 1]; //---------------------------------------------------------------------- //sort vectors kfirst to klast // ---------------------------------------------------------------------- for (int64_t k = kfirst; k < klast; k++) { //------------------------------------------------------------------ //check if the vector needs sorting // ------------------------------------------------------------------ bool jumbled = false; int64_t pA_start = Ap[k]; int64_t pA_end = Ap[k + 1]; int64_t ilast = -1; for (int64_t pA = pA_start; pA < pA_end; pA++) { int64_t i = Ai[pA]; if (i < ilast) { jumbled = true; break; } ilast = i; } //------------------------------------------------------------------ //sort the vector // ------------------------------------------------------------------ if (jumbled) { int64_t aknz = pA_end - pA_start; GB_QSORT_WORKER; } } } } #undef GB_QSORT_WORKER
// ------------------------------------------------------------------------------ //GB_unjumble_template:unjumble the vectors of a matrix // ------------------------------------------------------------------------------ //SuiteSparse:GraphBLAS, Timothy A.Davis, (c) 2017 - 2021, All Rights Reserved. // SPDX - License - Identifier:Apache - 2.0 // ------------------------------------------------------------------------------ { int tid; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0; tid < ntasks; tid++) { //---------------------------------------------------------------------- //get the task description // ---------------------------------------------------------------------- int64_t kfirst = A_slice[tid]; int64_t klast = A_slice[tid + 1]; //---------------------------------------------------------------------- //sort vectors kfirst to klast // ---------------------------------------------------------------------- for (int64_t k = kfirst; k < klast; k++) { //------------------------------------------------------------------ //check if the vector needs sorting // ------------------------------------------------------------------ bool jumbled = false; int64_t pA_start = Ap[k]; int64_t pA_end = Ap[k + 1]; int64_t ilast = -1; for (int64_t pA = pA_start; pA < pA_end; pA++) { int64_t i = Ai[pA]; if (i < ilast) { jumbled = true; break; } ilast = i; } //------------------------------------------------------------------ //sort the vector // ------------------------------------------------------------------ if (jumbled) { int64_t aknz = pA_end - pA_start; GB_QSORT_WORKER; } } } } #undef GB_QSORT_WORKER
par_csr_matrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* In addition to publically accessible interface in HYPRE_mv.h, the implementation in this file uses accessor macros into the sequential matrix structure, and so includes the .h that defines that structure. Should those accessor functions become proper functions at some later date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* If create is called and row_starts and col_starts are NOT null, then it is assumed that they are of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix* hypre_ParCSRMatrixCreate( MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt *row_starts, HYPRE_BigInt *col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd ) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); if (!row_starts) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); } } /* row_starts[0] is start of local rows. row_starts[1] is start of next processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1]-first_row_index ; first_col_diag = col_starts[0]; local_num_cols = col_starts[1]-first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rows; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; /* We could make these null instead of leaving the range. If that change is made, then when this create is called from functions like the matrix-matrix multiply, be careful not to generate a new partition. */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix ) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if ( hypre_ParCSRMatrixOwnsData(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if ( hypre_ParCSRMatrixDiagT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if ( hypre_ParCSRMatrixOffdT(matrix) ) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) ) { hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST); } if ( hypre_ParCSRMatrixOwnsColStarts(matrix) ) { hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST); } /* RL: this is actually not correct since the memory_location may have been changed after allocation * put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if ( hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix) ) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if ( hypre_ParCSRMatrixProcOrdering(matrix) ) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2( hypre_ParCSRMatrix *matrix, HYPRE_MemoryLocation memory_location ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix* hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix *A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate( hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)) ); /* !!! S does not own Row/Col-Starts */ hypre_ParCSRMatrixSetRowStartsOwner(S, 0); hypre_ParCSRMatrixSetColStartsOwner(S, 0); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix* hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix *A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location) ) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core( hypre_ParCSRMatrix *matrix, const char* format ) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) ( hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd) ); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix ) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumRownnz *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumRownnz( hypre_ParCSRMatrix *matrix ) { MPI_Comm comm = hypre_ParCSRMatrixComm(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_Int *rownnz_diag = hypre_CSRMatrixRownnz(diag); HYPRE_Int *rownnz_offd = hypre_CSRMatrixRownnz(diag); HYPRE_Int num_rownnz_diag = hypre_CSRMatrixNumRownnz(diag); HYPRE_Int num_rownnz_offd = hypre_CSRMatrixNumRownnz(offd); HYPRE_BigInt local_num_rownnz; HYPRE_BigInt global_num_rownnz; HYPRE_Int i, j; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } local_num_rownnz = i = j = 0; while (i < num_rownnz_diag && j < num_rownnz_offd) { local_num_rownnz++; if (rownnz_diag[i] < rownnz_offd[j]) { i++; } else { j++; } } local_num_rownnz += (HYPRE_BigInt) ((num_rownnz_diag - i) + (num_rownnz_offd - j)); hypre_MPI_Allreduce(&local_num_rownnz, &global_num_rownnz, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rownnz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_data ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_row_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix, HYPRE_Int owns_col_starts ) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead( MPI_Comm comm, const char *file_name ) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_BigInt global_num_rows, global_num_cols; HYPRE_Int num_cols_offd; HYPRE_Int local_num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_BigInt *col_map_offd; FILE *fp; HYPRE_Int equal = 1; HYPRE_BigInt row_s, row_e, col_s, col_e; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* the bgl input file should only contain the EXACT range for local processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); for (i=1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows,0,0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix, const char *file_name ) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id); hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id); hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i=0; i < num_cols_offd; i++) hypre_fprintf(fp, "%b\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0]+(HYPRE_BigInt)base_i; iupper = row_starts[1]+(HYPRE_BigInt)base_i - 1; jlower = col_starts[0]+(HYPRE_BigInt)base_j; jupper = col_starts[1]+(HYPRE_BigInt)base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt)(i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i+1]; j++) { J = first_col_diag + (HYPRE_BigInt)(diag_j[j] + base_j); if ( diag_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } /* print offd columns */ if ( num_nonzeros_offd ) { for (j = offd_i[i]; j < offd_i[i+1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt)base_j; if ( offd_data ) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J ); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_i_ptr, HYPRE_Int *base_j_ptr, hypre_ParCSRMatrix **matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int)row_starts[0]; base_j = (HYPRE_Int)col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= big_base_i; col_starts[i] -= big_base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag+(HYPRE_BigInt)num_cols-1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int)(I-big_base_i-first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int)(J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_nonzeros_offd; i++) aux_offd_j[i] = (HYPRE_BigInt)offd_j[i]; hypre_BigQsort0(aux_offd_j,0,num_nonzeros_offd-1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i=1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i=0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i=0; i < num_rows; i++) { i_col = diag_i[i]; for (j=i_col; j < diag_i[i+1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix, HYPRE_BigInt *row_start, HYPRE_BigInt *row_end, HYPRE_BigInt *col_start, HYPRE_BigInt *col_end ) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id ); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id ); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return(-1); } /* if buffer is not allocated and some information is requested, allocate buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values )) { /* allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1,tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for ( i = 0; i < m; i++ ) { tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int)(row-row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow+1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] ); nzB = hypre_CSRMatrixI(Ba)[lrow+1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] ); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* Sort by increasing column numbers, assuming A and B already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for ( i = 0; i < nzA; i++ ) { v_p[imark+i] = vworkA[i]; } for ( i = imark; i < nzB; i++ ) { v_p[nzA+i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for ( i = 0; i < imark; i++ ) { idx_p[i] = cmap[cworkB[i]]; } } else { for ( i = 0; i < nzB; i++ ) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for ( i = 0; i < nzA; i++ ) { idx_p[imark+i] = cstart + cworkA[i]; } for ( i = imark; i < nzB; i++ ) { idx_p[nzA+i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(mat) ); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm, hypre_CSRMatrix *A, HYPRE_BigInt *global_row_starts, HYPRE_BigInt *global_col_starts ) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt *row_starts = NULL; HYPRE_BigInt *col_starts = NULL; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2*(num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* contains code for what to expect, if 0: global_row_starts = global_col_starts, only global_row_starts given if 1: only global_row_starts given, global_col_starts = NULL if 2: both global_row_starts and global_col_starts given if 3: only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2*(num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i+num_procs+5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i+4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts = row_starts; } } else { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); // Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); // Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i+1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int)global_row_starts[i+1]] - A_i[(HYPRE_Int)global_row_starts[i]]; } //num_nonzeros_proc[num_procs-1] = A_i[(HYPRE_Int)global_num_rows] - A_i[(HYPRE_Int)row_starts[num_procs-1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs-1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i-1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs-1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix *A, hypre_ParCSRMatrix *matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /*RL: XXX FIXME if A spans global column space, the following a_j should be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int)(last_col_diag - first_col_diag +1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i=0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i=0; i < num_rows; i++) { for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++) { if (a_j[j] < (HYPRE_Int)first_col_diag || a_j[j] > (HYPRE_Int)last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int)(a_j[j]-first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i=0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST); for (i=0; i < num_rows+1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows/num_threads; rest = num_rows - size*num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } count = diag_i[ns]+offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j=diag_i[i]; j < diag_i[i+1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt)diag_j[j]+first_col_diag; } for (j=offd_i[i]; j < offd_i[i+1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int)(hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* determine procs that have vector data and store their ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int)hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_BigInt)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST); num_requests = 4*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* exchange contents of local_matrix_i - here we are sending to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering?*/ offset = matrix_i[new_vec_starts[1]]; for (i=1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++) matrix_i[j+1] += offset; offset = matrix_i[new_vec_starts[i+1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* generate datatypes for further data exchange and exchange remaining data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int)new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int)new_vec_starts[i+1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i=0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *B, HYPRE_Int copy_data ) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B ) { hypre_ParCSRMatrix * C; HYPRE_BigInt * col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm( A ); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 , HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A ); hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A ); hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A ); hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A ); hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B ) == hypre_ParCSRMatrixFirstRowIndex( A ) ); hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A ); hypre_ParCSRMatrixOwnsRowStarts( C ) = 0; hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A ); hypre_ParCSRMatrixOwnsColStarts( C ) = 0; for ( p=0; p<=num_procs; ++p ) hypre_assert( hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B) ); hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A ); hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A ); hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A ); hypre_ParCSRMatrixDiag( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0 ); hypre_ParCSRMatrixOffd( C ) = hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C ); hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C; hypre_ParCSRMatrixCommPkg( C ) = NULL; hypre_ParCSRMatrixCommPkgT( C ) = NULL; hypre_ParCSRMatrixOwnsData( C ) = 1; /* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. I suspect, but don't know, that other parts of hypre do not assume that the correct values have been set. hypre_ParCSRMatrixSetNumNonzeros( C ); hypre_ParCSRMatrixSetDNumNonzeros( C );*/ hypre_ParCSRMatrixNumNonzeros( C ) = 0; hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0; hypre_ParCSRMatrixRowindices( C ) = NULL; hypre_ParCSRMatrixRowvalues( C ) = NULL; hypre_ParCSRMatrixGetrowactive( C ) = 0; return C; } /* drop the entries that are not on the diagonal and smaller than * its row norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank*/ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (tol <= 0.0) { return hypre_error_flag; } marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i+1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v*v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i+1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v*v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i+1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag ++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i+1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* in normal cases: diagonal entry should not * appear in A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd ++; } } } A_diag_i_i = A_diag_i[i+1]; A_offd_i_i = A_offd_i[i+1]; A_diag_i[i+1] = nnz_diag; A_offd_i[i+1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if ( hypre_ParCSRMatrixCommPkg(A) ) { hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) ); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Perform dual truncation of ParCSR matrix. * This code is adapted from original BoomerAMGInterpTruncate() * A: parCSR matrix to be modified * tol: relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero values after dropping), * this way, the application of the truncated matrix on a constant vector is the same as that of * the original matrix. * nrm_type: type of norm used for dropping with tol. * -- 0 = infinity-norm * -- 1 = 1-norm * -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix *A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global=0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. Cum_lost_per_thread * will temporarily store the cumulative number of dropped entries up to * each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * cum_lost_per_thread; HYPRE_Int * num_lost_per_thread; HYPRE_Int * num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* Compute each thread's range of rows to truncate and compress. Note, * that i, j and data are all compressed as entries are dropped, but * that the compression only occurs locally over each thread's row * range. A_diag_i is only made globally consistent at the end of this * routine. During the dropping phases, A_diag_i[stop] will point to * the start of the next thread's row range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i+1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v*v; } for (j = A_offd_i[i]; j < A_offd_i[i+1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v*v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i+1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i+1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum/scale; for (j = A_diag_i[i]; j < (A_diag_i[i+1]-num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i+1]-num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i=start; i<stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i+1]; last_index_offd = A_offd_i[i+1]; if (i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* Some rows exceed max_row_elmts, and require truncation. Essentially, * each thread truncates and compresses its range of rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* two temporary arrays to hold row i for temporary operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* Note A_diag_i[stop] is the starting point for the next thread * in j and data, not the stop point for this thread */ last_index = A_diag_i[i+1]; last_index_offd = A_offd_i[i+1]; if (i == stop-1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index-A_diag_i[i] + last_index_offd-A_offd_i[i]; if (max_row_elmts < num_elmts) { /* copy both diagonal and off-diag parts of row i to _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j]+num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt-cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j,A_aux_data,0,cnt-1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j]-num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag-A_diag_i[i]; num_lost_offd -= cnt_offd-A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum/scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* nothing dropped from this row, but still have to shift entries back * by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index-A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd-A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* Each thread has it's own locally compressed CSR matrix from rows start * to stop. Now, we have to copy each thread's chunk into the new * process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_diag_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i=start; i<stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size ; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i-1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* points to next open spot in new data structures for this thread */ if (my_thread_num == 0) { next_open = 0; } else { /* remember, cum_lost_per_thread[k] stores the num dropped up to and * including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num-1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* update A_offd_i with number of dropped entries by all lower ranked * threads */ if (my_thread_num > 0) { for (i=start; i<stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num-1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size ; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; }
/****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* * In addition to publically accessible interface in HYPRE_mv.h, the * implementation in this file uses accessor macros into the sequential * matrix structure, and so includes the .h that defines that structure. * Should those accessor functions become proper functions at some later * date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void *, HYPRE_Int, HYPRE_Int, void *, MPI_Comm, void **, HYPRE_Int *); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* * If create is called and row_starts and col_starts are NOT null, then it is * assumed that they are of length 2 containing the start row of the calling * processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix * hypre_ParCSRMatrixCreate(MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt * row_starts, HYPRE_BigInt * col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); if (!row_starts) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); } } /* * row_starts[0] is start of local rows. row_starts[1] is start of next * processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1] - first_row_index; first_col_diag = col_starts[0]; local_num_cols = col_starts[1] - first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; //JSP:transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rows; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; /* * We could make these null instead of leaving the range. If that change * is made, then when this create is called from functions like the * matrix-matrix multiply, be careful not to generate a new partition. */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy(hypre_ParCSRMatrix * matrix) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if (hypre_ParCSRMatrixOwnsData(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if (hypre_ParCSRMatrixDiagT(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if (hypre_ParCSRMatrixOffdT(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } if (hypre_ParCSRMatrixOwnsRowStarts(matrix)) { hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixOwnsColStarts(matrix)) { hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST); } /* * RL: this is actually not correct since the memory_location may * have been changed after allocation put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if (hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix)) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if (hypre_ParCSRMatrixProcOrdering(matrix)) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2(hypre_ParCSRMatrix * matrix, HYPRE_MemoryLocation memory_location) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix * A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A))); /* !!! S does not own Row/Col-Starts */ hypre_ParCSRMatrixSetRowStartsOwner(S, 0); hypre_ParCSRMatrixSetColStartsOwner(S, 0); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix * hypre_ParCSRMatrixClone(hypre_ParCSRMatrix * A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix * A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if (hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location)) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core(hypre_ParCSRMatrix * matrix, const char *format) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) (hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd)); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) (hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd)); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumRownnz *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumRownnz(hypre_ParCSRMatrix * matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_Int *rownnz_diag = hypre_CSRMatrixRownnz(diag); HYPRE_Int *rownnz_offd = hypre_CSRMatrixRownnz(diag); HYPRE_Int num_rownnz_diag = hypre_CSRMatrixNumRownnz(diag); HYPRE_Int num_rownnz_offd = hypre_CSRMatrixNumRownnz(offd); HYPRE_BigInt local_num_rownnz; HYPRE_BigInt global_num_rownnz; HYPRE_Int i, j; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } local_num_rownnz = i = j = 0; while (i < num_rownnz_diag && j < num_rownnz_offd) { local_num_rownnz++; if (rownnz_diag[i] < rownnz_offd[j]) { i++; } else { j++; } } local_num_rownnz += (HYPRE_BigInt) ((num_rownnz_diag - i) + (num_rownnz_offd - j)); hypre_MPI_Allreduce(&local_num_rownnz, &global_num_rownnz, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rownnz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_data) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_row_starts) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_col_starts) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead(MPI_Comm comm, const char *file_name) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_BigInt global_num_rows, global_num_cols; HYPRE_Int num_cols_offd; HYPRE_Int local_num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_BigInt *col_map_offd; FILE *fp; HYPRE_Int equal = 1; HYPRE_BigInt row_s, row_e, col_s, col_e; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* * the bgl input file should only contain the EXACT range for local * processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); for (i = 1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows, 0, 0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint(hypre_ParCSRMatrix * matrix, const char *file_name) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix), new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix), new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i = 0; i < num_cols_offd; i++) hypre_fprintf(fp, "%b\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ(const hypre_ParCSRMatrix * matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0] + (HYPRE_BigInt) base_i; iupper = row_starts[1] + (HYPRE_BigInt) base_i - 1; jlower = col_starts[0] + (HYPRE_BigInt) base_j; jupper = col_starts[1] + (HYPRE_BigInt) base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt) (i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i + 1]; j++) { J = first_col_diag + (HYPRE_BigInt) (diag_j[j] + base_j); if (diag_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } /* print offd columns */ if (num_nonzeros_offd) { for (j = offd_i[i]; j < offd_i[i + 1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt) base_j; if (offd_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ(MPI_Comm comm, const char *filename, HYPRE_Int * base_i_ptr, HYPRE_Int * base_j_ptr, hypre_ParCSRMatrix ** matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int) row_starts[0]; base_j = (HYPRE_Int) col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= big_base_i; col_starts[i] -= big_base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag + (HYPRE_BigInt) num_cols - 1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag + num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int) (I - big_base_i - first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int) (J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_nonzeros_offd; i++) aux_offd_j[i] = (HYPRE_BigInt) offd_j[i]; hypre_BigQsort0(aux_offd_j, 0, num_nonzeros_offd - 1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i = 1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i = 0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i = 0; i < num_rows; i++) { i_col = diag_i[i]; for (j = i_col; j < diag_i[i + 1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange(hypre_ParCSRMatrix * matrix, HYPRE_BigInt * row_start, HYPRE_BigInt * row_end, HYPRE_BigInt * col_start, HYPRE_BigInt * col_end) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(matrix), &my_id); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost(hypre_ParCSRMatrix * mat, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return (-1); } hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(mat), &my_id); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return (-1); } /* * if buffer is not allocated and some information is requested, allocate * buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values)) { /* * allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1, tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for (i = 0; i < m; i++) { tmp = hypre_CSRMatrixI(Aa)[i + 1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i + 1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int) (row - row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow + 1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &(hypre_CSRMatrixJ(Aa)[hypre_CSRMatrixI(Aa)[lrow]]); vworkA = &(hypre_CSRMatrixData(Aa)[hypre_CSRMatrixI(Aa)[lrow]]); nzB = hypre_CSRMatrixI(Ba)[lrow + 1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &(hypre_CSRMatrixJ(Ba)[hypre_CSRMatrixI(Ba)[lrow]]); vworkB = &(hypre_CSRMatrixData(Ba)[hypre_CSRMatrixI(Ba)[lrow]]); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* * Sort by increasing column numbers, assuming A and B * already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for (i = 0; i < nzB; i++) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for (i = 0; i < nzA; i++) { v_p[imark + i] = vworkA[i]; } for (i = imark; i < nzB; i++) { v_p[nzA + i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for (i = 0; i < imark; i++) { idx_p[i] = cmap[cworkB[i]]; } } else { for (i = 0; i < nzB; i++) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for (i = 0; i < nzA; i++) { idx_p[imark + i] = cstart + cworkA[i]; } for (i = imark; i < nzB; i++) { idx_p[nzA + i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow(hypre_ParCSRMatrix * mat, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(mat)); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow(hypre_ParCSRMatrix * matrix, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix(MPI_Comm comm, hypre_CSRMatrix * A, HYPRE_BigInt * global_row_starts, HYPRE_BigInt * global_col_starts) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt *row_starts = NULL; HYPRE_BigInt *col_starts = NULL; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2 * (num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* * contains code for what to expect, if 0: * global_row_starts = global_col_starts, only * global_row_starts given if 1: only global_row_starts * given, global_col_starts = NULL if 2: both * global_row_starts and global_col_starts given if 3: * only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2 * (num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i + num_procs + 5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts = row_starts; } } else { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); //Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); //Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i + 1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int) global_row_starts[i + 1]] - A_i[(HYPRE_Int) global_row_starts[i]]; } //num_nonzeros_proc[num_procs - 1] = A_i[(HYPRE_Int) global_num_rows] - A_i[(HYPRE_Int) row_starts[num_procs - 1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs - 1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs - 1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i - 1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs - 1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix * A, hypre_ParCSRMatrix * matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /* * RL: XXX FIXME if A spans global column space, the following a_j should * be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows] - first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int) (last_col_diag - first_col_diag + 1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < (HYPRE_Int) first_col_diag || a_j[j] > (HYPRE_Int) last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int) (a_j[j] - first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i = 0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows + 1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix * par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows / num_threads; rest = num_rows - size * num_threads; for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } count = diag_i[ns] + offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j = diag_i[i]; j < diag_i[i + 1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt) diag_j[j] + first_col_diag; } for (j = offd_i[i]; j < offd_i[i + 1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix * par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf = NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int) (hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* * determine procs that have vector data and store their ids in * used_procs */ /* * we need to do an exchange data for this. If I own row then I will * contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int) hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /* build the response object */ /* send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /* this is where we keep info * from contacts */ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void **)&response_recv_buf, &response_recv_buf_starts); /* * now processor 0 should have a list of ranges for processors that have * rows - these are in send_proc_obj - it needs to create the new list of * processors and also an array of vec starts - and send to those who own * row */ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_types; i++) { used_procs[i - 1] = send_info[i]; } for (i = num_types + 1; i < count; i++) { new_vec_starts[i - num_types - 1] = send_info[i]; } } else /* clean up and exit */ /* * hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); * hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); * hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); * if(response_recv_buf) hypre_TFree(response_recv_buf, * HYPRE_MEMORY_HOST); if(response_recv_buf_starts) * hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); * * * if (hypre_CSRMatrixOwnsData(local_matrix)) * hypre_CSRMatrixDestroy(local_matrix); else * hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); * * * return NULL; } } else /* my_id ==0 */ /* * num_types = send_proc_obj.length; used_procs = * hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); * new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, * HYPRE_MEMORY_HOST); * * new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { * used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = * send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, * num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); * /*now we need to put into an array to send */ count = 2 * num_types + 2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i = 1; i <= num_types; i++) { send_info[i] = (HYPRE_BigInt) used_procs[i - 1]; } for (i = num_types + 1; i < count; i++) { send_info[i] = new_vec_starts[i - num_types - 1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first */ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i = start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i - start]); } hypre_MPI_Waitall(num_types - start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if (response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if (response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* * everyone left has rows and knows: new_vec_starts, num_types, and * used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); num_requests = 4 * num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* * exchange contents of local_matrix_i - here we are sending to ourself * also */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int) (new_vec_starts[i + 1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i] + 1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering? */ offset = matrix_i[new_vec_starts[1]]; for (i = 1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i + 1]; j++) matrix_i[j + 1] += offset; offset = matrix_i[new_vec_starts[i + 1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* * generate datatypes for further data exchange and exchange remaining * data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int) new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int) new_vec_starts[i + 1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy(hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B, HYPRE_Int copy_data) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix(void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int * response_message_size) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt *) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse *) ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements *) response_obj->data2; hypre_MPI_Comm_rank(comm, &myid); /* * check to see if we need to allocate more space in send_proc_obj for * ids */ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length += 10; /* add space for 10 more * processors */ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /* initialize */ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /* this is the number of * elements */ /* send proc */ send_proc_obj->id[count] = contact_proc; /* do we need more storage for the elements? */ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /* populate send_proc_obj */ for (i = 0; i < contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count + 1] = index; send_proc_obj->length++; /* output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion(hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B) { hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrixGlobalNumRows(C) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixGlobalNumCols(C) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixFirstRowIndex(C) = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(hypre_ParCSRMatrixFirstRowIndex(B) == hypre_ParCSRMatrixFirstRowIndex(A)); hypre_ParCSRMatrixRowStarts(C) = hypre_ParCSRMatrixRowStarts(A); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixColStarts(C) = hypre_ParCSRMatrixColStarts(A); hypre_ParCSRMatrixOwnsColStarts(C) = 0; for (p = 0; p <= num_procs; ++p) hypre_assert(hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B)); hypre_ParCSRMatrixFirstColDiag(C) = hypre_ParCSRMatrixFirstColDiag(A); hypre_ParCSRMatrixLastRowIndex(C) = hypre_ParCSRMatrixLastRowIndex(A); hypre_ParCSRMatrixLastColDiag(C) = hypre_ParCSRMatrixLastColDiag(A); hypre_ParCSRMatrixDiag(C) = hypre_CSRMatrixUnion(hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0); hypre_ParCSRMatrixOffd(C) = hypre_CSRMatrixUnion(hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C); hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; hypre_ParCSRMatrixOwnsData(C) = 1; /* * SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. * I suspect, but don't know, that other parts of hypre do not assume * that the correct values have been set. * hypre_ParCSRMatrixSetNumNonzeros( C ); * hypre_ParCSRMatrixSetDNumNonzeros( C ); */ hypre_ParCSRMatrixNumNonzeros(C) = 0; hypre_ParCSRMatrixDNumNonzeros(C) = 0.0; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; return C; } /* * drop the entries that are not on the diagonal and smaller than its row * norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries(hypre_ParCSRMatrix * A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (tol <= 0.0) { return hypre_error_flag; } marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* * in normal cases: diagonal entry should not appear in * A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd++; } } } A_diag_i_i = A_diag_i[i + 1]; A_offd_i_i = A_offd_i[i + 1]; A_diag_i[i + 1] = nnz_diag; A_offd_i[i + 1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if (hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(A)); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* * Perform dual truncation of ParCSR matrix. This code is adapted from * original BoomerAMGInterpTruncate() A: parCSR matrix to be modified tol: * relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero * values after dropping), this way, the application of the truncated matrix * on a constant vector is the same as that of the original matrix. nrm_type: * type of norm used for dropping with tol. -- 0 = infinity-norm -- 1 = * 1-norm -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix * A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global = 0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* * Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. * Cum_lost_per_thread will temporarily store the cumulative number of * dropped entries up to each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int *max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int *cum_lost_per_thread; HYPRE_Int *num_lost_per_thread; HYPRE_Int *num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* * Compute each thread's range of rows to truncate and compress. * Note, that i, j and data are all compressed as entries are * dropped, but that the compression only occurs locally over each * thread's row range. A_diag_i is only made globally consistent at * the end of this routine. During the dropping phases, * A_diag_i[stop] will point to the start of the next thread's row * range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v * v; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v * v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i + 1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i + 1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < (A_diag_i[i + 1] - num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i + 1] - num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i = start; i < stop; i++) { /* * Note A_diag_i[stop] is the starting point for the next * thread in j and data, not the stop point for this thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* * Some rows exceed max_row_elmts, and require truncation. * Essentially, each thread truncates and compresses its range of * rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* * two temporary arrays to hold row i for temporary * operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* * Note A_diag_i[stop] is the starting point for the next * thread in j and data, not the stop point for this * thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (max_row_elmts < num_elmts) { /* * copy both diagonal and off-diag parts of row i to * _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j] + num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt - cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j, A_aux_data, 0, cnt - 1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j] - num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag - A_diag_i[i]; num_lost_offd -= cnt_offd - A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* * nothing dropped from this row, but still have to * shift entries back by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index - A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd - A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* * Each thread has it's own locally compressed CSR matrix from * rows start to stop. Now, we have to copy each thread's chunk * into the new process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } /* * points to next open spot in new data structures for this * thread */ if (my_thread_num == 0) { next_open = 0; } else { /* * remember, cum_lost_per_thread[k] stores the num dropped up * to and including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } /* * update A_diag_i with number of dropped entries by all lower * ranked threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } /* * points to next open spot in new data structures for this * thread */ if (my_thread_num == 0) { next_open = 0; } else { /* * remember, cum_lost_per_thread[k] stores the num dropped up * to and including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } /* * update A_offd_i with number of dropped entries by all lower * ranked threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; }
/****************************************************************************** * * Member functions for hypre_ParCSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "../seq_mv/HYPRE_seq_mv.h" #include "../seq_mv/csr_matrix.h" /* * In addition to publically accessible interface in HYPRE_mv.h, the * implementation in this file uses accessor macros into the sequential * matrix structure, and so includes the .h that defines that structure. * Should those accessor functions become proper functions at some later * date, this will not be necessary. AJC 4/99 */ HYPRE_Int hypre_FillResponseParToCSRMatrix(void *, HYPRE_Int, HYPRE_Int, void *, MPI_Comm, void **, HYPRE_Int *); /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCreate *--------------------------------------------------------------------------*/ /* * If create is called and row_starts and col_starts are NOT null, then it is * assumed that they are of length 2 containing the start row of the calling * processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParCSRMatrix * hypre_ParCSRMatrixCreate(MPI_Comm comm, HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols, HYPRE_BigInt * row_starts, HYPRE_BigInt * col_starts, HYPRE_Int num_cols_offd, HYPRE_Int num_nonzeros_diag, HYPRE_Int num_nonzeros_offd) { hypre_ParCSRMatrix *matrix; HYPRE_Int num_procs, my_id; HYPRE_Int local_num_rows, local_num_cols; HYPRE_BigInt first_row_index, first_col_diag; matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); if (!row_starts) { hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id, &row_starts); } if (!col_starts) { if (global_num_rows == global_num_cols) { col_starts = row_starts; } else { hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id, &col_starts); } } /* * row_starts[0] is start of local rows. row_starts[1] is start of next * processor's rows */ first_row_index = row_starts[0]; local_num_rows = row_starts[1] - first_row_index; first_col_diag = col_starts[0]; local_num_cols = col_starts[1] - first_col_diag; hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixDiag(matrix) = hypre_CSRMatrixCreate(local_num_rows, local_num_cols, num_nonzeros_diag); hypre_ParCSRMatrixOffd(matrix) = hypre_CSRMatrixCreate(local_num_rows, num_cols_offd, num_nonzeros_offd); hypre_ParCSRMatrixDiagT(matrix) = NULL; hypre_ParCSRMatrixOffdT(matrix) = NULL; //JSP:transposed matrices are optional hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rows; hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index; hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag; hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1; hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1; hypre_ParCSRMatrixColMapOffd(matrix) = NULL; hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL; hypre_ParCSRMatrixProcOrdering(matrix) = NULL; hypre_ParCSRMatrixAssumedPartition(matrix) = NULL; hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1; /* * We could make these null instead of leaving the range. If that change * is made, then when this create is called from functions like the * matrix-matrix multiply, be careful not to generate a new partition. */ hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; hypre_ParCSRMatrixCommPkgT(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixRowindices(matrix) = NULL; hypre_ParCSRMatrixRowvalues(matrix) = NULL; hypre_ParCSRMatrixGetrowactive(matrix) = 0; matrix->bdiaginv = NULL; matrix->bdiaginv_comm_pkg = NULL; matrix->bdiag_size = -1; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_ParCSRMatrixSocDiagJ(matrix) = NULL; hypre_ParCSRMatrixSocOffdJ(matrix) = NULL; #endif return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixDestroy(hypre_ParCSRMatrix * matrix) { if (matrix) { HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(matrix); if (hypre_ParCSRMatrixOwnsData(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix)); if (hypre_ParCSRMatrixDiagT(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix)); } if (hypre_ParCSRMatrixOffdT(matrix)) { hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix)); } if (hypre_ParCSRMatrixColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixDeviceColMapOffd(matrix)) { hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE); } if (hypre_ParCSRMatrixCommPkg(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix)); } if (hypre_ParCSRMatrixCommPkgT(matrix)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix)); } } if (hypre_ParCSRMatrixOwnsRowStarts(matrix)) { hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST); } if (hypre_ParCSRMatrixOwnsColStarts(matrix)) { hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST); } /* * RL: this is actually not correct since the memory_location may * have been changed after allocation put them in containers TODO */ hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), memory_location); if (hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix)) { hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix)); } if (hypre_ParCSRMatrixProcOrdering(matrix)) { hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST); } hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST); if (matrix->bdiaginv_comm_pkg) { hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_TFree(hypre_ParCSRMatrixSocDiagJ(matrix), HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRMatrixSocOffdJ(matrix), HYPRE_MEMORY_DEVICE); #endif hypre_TFree(matrix, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixInitialize_v2(hypre_ParCSRMatrix * matrix, HYPRE_MemoryLocation memory_location) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location); hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location); hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)), HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixInitialize(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixInitialize_v2(matrix, hypre_ParCSRMatrixMemoryLocation(matrix)); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixClone * Creates and returns a new copy S of the argument A * The following variables are not copied because they will be constructed * later if needed: CommPkg, CommPkgT, rowindices, rowvalues *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix * A, HYPRE_Int copy_data, HYPRE_MemoryLocation memory_location) { hypre_ParCSRMatrix *S; S = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)), hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A))); /* !!! S does not own Row/Col-Starts */ hypre_ParCSRMatrixSetRowStartsOwner(S, 0); hypre_ParCSRMatrixSetColStartsOwner(S, 0); hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A); hypre_ParCSRMatrixInitialize_v2(S, memory_location); hypre_ParCSRMatrixCopy(A, S, copy_data); return S; } hypre_ParCSRMatrix * hypre_ParCSRMatrixClone(hypre_ParCSRMatrix * A, HYPRE_Int copy_data) { return hypre_ParCSRMatrixClone_v2(A, copy_data, hypre_ParCSRMatrixMemoryLocation(A)); } HYPRE_Int hypre_ParCSRMatrixMigrate(hypre_ParCSRMatrix * A, HYPRE_MemoryLocation memory_location) { if (!A) { return hypre_error_flag; } HYPRE_MemoryLocation old_memory_location = hypre_ParCSRMatrixMemoryLocation(A); if (hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(old_memory_location)) { hypre_CSRMatrix *A_diag = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixDiag(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(A)); hypre_ParCSRMatrixDiag(A) = A_diag; hypre_CSRMatrix *A_offd = hypre_CSRMatrixClone_v2(hypre_ParCSRMatrixOffd(A), 1, memory_location); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(A)); hypre_ParCSRMatrixOffd(A) = A_offd; hypre_TFree(hypre_ParCSRMatrixRowindices(A), old_memory_location); hypre_TFree(hypre_ParCSRMatrixRowvalues(A), old_memory_location); } else { hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(A)) = memory_location; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(A)) = memory_location; } return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros_core(hypre_ParCSRMatrix * matrix, const char *format) { MPI_Comm comm; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); /* TODO in HYPRE_DEBUG ? */ hypre_CSRMatrixCheckSetNumNonzeros(diag); hypre_CSRMatrixCheckSetNumNonzeros(offd); if (format[0] == 'I') { HYPRE_BigInt total_num_nonzeros; HYPRE_BigInt local_num_nonzeros; local_num_nonzeros = (HYPRE_BigInt) (hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd)); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros; } else if (format[0] == 'D') { HYPRE_Real total_num_nonzeros; HYPRE_Real local_num_nonzeros; local_num_nonzeros = (HYPRE_Real) (hypre_CSRMatrixNumNonzeros(diag) + hypre_CSRMatrixNumNonzeros(offd)); hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros; } else { hypre_error_in_arg(1); return hypre_error_flag; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumNonzeros(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Int"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDNumNonzeros *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDNumNonzeros(hypre_ParCSRMatrix * matrix) { return hypre_ParCSRMatrixSetNumNonzeros_core(matrix, "Double"); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetNumRownnz *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetNumRownnz(hypre_ParCSRMatrix * matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(matrix); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_Int *rownnz_diag = hypre_CSRMatrixRownnz(diag); HYPRE_Int *rownnz_offd = hypre_CSRMatrixRownnz(diag); HYPRE_Int num_rownnz_diag = hypre_CSRMatrixNumRownnz(diag); HYPRE_Int num_rownnz_offd = hypre_CSRMatrixNumRownnz(offd); HYPRE_BigInt local_num_rownnz; HYPRE_BigInt global_num_rownnz; HYPRE_Int i, j; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } local_num_rownnz = i = j = 0; while (i < num_rownnz_diag && j < num_rownnz_offd) { local_num_rownnz++; if (rownnz_diag[i] < rownnz_offd[j]) { i++; } else { j++; } } local_num_rownnz += (HYPRE_BigInt) ((num_rownnz_diag - i) + (num_rownnz_offd - j)); hypre_MPI_Allreduce(&local_num_rownnz, &global_num_rownnz, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); hypre_ParCSRMatrixGlobalNumRownnz(matrix) = global_num_rownnz; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetDataOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_data) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsData(matrix) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetRowStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetRowStartsOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_row_starts) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixSetColStartsOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixSetColStartsOwner(hypre_ParCSRMatrix * matrix, HYPRE_Int owns_col_starts) { if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRead *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixRead(MPI_Comm comm, const char *file_name) { hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; HYPRE_BigInt global_num_rows, global_num_cols; HYPRE_Int num_cols_offd; HYPRE_Int local_num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_BigInt *col_map_offd; FILE *fp; HYPRE_Int equal = 1; HYPRE_BigInt row_s, row_e, col_s, col_e; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); fp = fopen(new_file_info, "r"); hypre_fscanf(fp, "%b", &global_num_rows); hypre_fscanf(fp, "%b", &global_num_cols); hypre_fscanf(fp, "%d", &num_cols_offd); /* * the bgl input file should only contain the EXACT range for local * processor */ hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e); row_starts[0] = row_s; row_starts[1] = row_e; col_starts[0] = col_s; col_starts[1] = col_e; col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { hypre_fscanf(fp, "%b", &col_map_offd[i]); } fclose(fp); for (i = 1; i >= 0; i--) { if (row_starts[i] != col_starts[i]) { equal = 0; break; } } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } diag = hypre_CSRMatrixRead(new_file_d); local_num_rows = hypre_CSRMatrixNumRows(diag); if (num_cols_offd) { offd = hypre_CSRMatrixRead(new_file_o); } else { offd = hypre_CSRMatrixCreate(local_num_rows, 0, 0); hypre_CSRMatrixInitialize(offd); } matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(matrix) = comm; hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows; hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols; hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s; hypre_ParCSRMatrixFirstColDiag(matrix) = col_s; hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1; hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1; hypre_ParCSRMatrixRowStarts(matrix) = row_starts; hypre_ParCSRMatrixColStarts(matrix) = col_starts; hypre_ParCSRMatrixCommPkg(matrix) = NULL; /* set defaults */ hypre_ParCSRMatrixOwnsData(matrix) = 1; hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1; hypre_ParCSRMatrixOwnsColStarts(matrix) = 1; if (row_starts == col_starts) { hypre_ParCSRMatrixOwnsColStarts(matrix) = 0; } hypre_ParCSRMatrixDiag(matrix) = diag; hypre_ParCSRMatrixOffd(matrix) = offd; if (num_cols_offd) { hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd; } else { hypre_ParCSRMatrixColMapOffd(matrix) = NULL; } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrint(hypre_ParCSRMatrix * matrix, const char *file_name) { MPI_Comm comm; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt *col_map_offd; HYPRE_Int my_id, i, num_procs; char new_file_d[80], new_file_o[80], new_file_info[80]; FILE *fp; HYPRE_Int num_cols_offd = 0; HYPRE_BigInt row_s, row_e, col_s, col_e; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix); global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); if (hypre_ParCSRMatrixOffd(matrix)) num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_file_d, "%s.D.%d", file_name, my_id); hypre_sprintf(new_file_o, "%s.O.%d", file_name, my_id); hypre_sprintf(new_file_info, "%s.INFO.%d", file_name, my_id); hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix), new_file_d); if (num_cols_offd != 0) hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix), new_file_o); fp = fopen(new_file_info, "w"); hypre_fprintf(fp, "%b\n", global_num_rows); hypre_fprintf(fp, "%b\n", global_num_cols); hypre_fprintf(fp, "%d\n", num_cols_offd); row_s = hypre_ParCSRMatrixFirstRowIndex(matrix); row_e = hypre_ParCSRMatrixLastRowIndex(matrix); col_s = hypre_ParCSRMatrixFirstColDiag(matrix); col_e = hypre_ParCSRMatrixLastColDiag(matrix); /* add 1 to the ends because this is a starts partition */ hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1); for (i = 0; i < num_cols_offd; i++) hypre_fprintf(fp, "%b\n", col_map_offd[i]); fclose(fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixPrintIJ(const hypre_ParCSRMatrix * matrix, const HYPRE_Int base_i, const HYPRE_Int base_j, const char *filename) { MPI_Comm comm; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_Int num_rows; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_Int myid, num_procs, i, j; HYPRE_BigInt I, J; char new_filename[255]; FILE *file; HYPRE_Int num_nonzeros_offd; HYPRE_BigInt ilower, iupper, jlower, jupper; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParCSRMatrixComm(matrix); first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); num_rows = hypre_ParCSRMatrixNumRows(matrix); row_starts = hypre_ParCSRMatrixRowStarts(matrix); col_starts = hypre_ParCSRMatrixColStarts(matrix); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); } ilower = row_starts[0] + (HYPRE_BigInt) base_i; iupper = row_starts[1] + (HYPRE_BigInt) base_i - 1; jlower = col_starts[0] + (HYPRE_BigInt) base_j; jupper = col_starts[1] + (HYPRE_BigInt) base_j - 1; hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper); for (i = 0; i < num_rows; i++) { I = first_row_index + (HYPRE_BigInt) (i + base_i); /* print diag columns */ for (j = diag_i[i]; j < diag_i[i + 1]; j++) { J = first_col_diag + (HYPRE_BigInt) (diag_j[j] + base_j); if (diag_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(diag_data[j]), hypre_cimag(diag_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } /* print offd columns */ if (num_nonzeros_offd) { for (j = offd_i[i]; j < offd_i[i + 1]; j++) { J = col_map_offd[offd_j[j]] + (HYPRE_BigInt) base_j; if (offd_data) { #ifdef HYPRE_COMPLEX hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J, hypre_creal(offd_data[j]), hypre_cimag(offd_data[j])); #else hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]); #endif } else hypre_fprintf(file, "%b %b\n", I, J); } } } fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixReadIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixReadIJ(MPI_Comm comm, const char *filename, HYPRE_Int * base_i_ptr, HYPRE_Int * base_j_ptr, hypre_ParCSRMatrix ** matrix_ptr) { HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_BigInt first_row_index; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; hypre_ParCSRMatrix *matrix; hypre_CSRMatrix *diag; hypre_CSRMatrix *offd; HYPRE_BigInt *col_map_offd; HYPRE_BigInt *row_starts; HYPRE_BigInt *col_starts; HYPRE_Int num_rows; HYPRE_BigInt big_base_i, big_base_j; HYPRE_Int base_i, base_j; HYPRE_Complex *diag_data; HYPRE_Int *diag_i; HYPRE_Int *diag_j; HYPRE_Complex *offd_data; HYPRE_Int *offd_i; HYPRE_Int *offd_j; HYPRE_BigInt *tmp_j; HYPRE_BigInt *aux_offd_j; HYPRE_BigInt I, J; HYPRE_Int myid, num_procs, i, i2, j; char new_filename[255]; FILE *file; HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd; HYPRE_Int equal, i_col, num_cols; HYPRE_Int diag_cnt, offd_cnt, row_cnt; HYPRE_Complex data; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename, "%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols); hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd); hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd); row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs + 1, HYPRE_MEMORY_HOST); for (i = 0; i <= num_procs; i++) hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]); big_base_i = row_starts[0]; big_base_j = col_starts[0]; base_i = (HYPRE_Int) row_starts[0]; base_j = (HYPRE_Int) col_starts[0]; equal = 1; for (i = 0; i <= num_procs; i++) { row_starts[i] -= big_base_i; col_starts[i] -= big_base_j; if (row_starts[i] != col_starts[i]) equal = 0; } if (equal) { hypre_TFree(col_starts, HYPRE_MEMORY_HOST); col_starts = row_starts; } matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, num_cols_offd, num_nonzeros_diag, num_nonzeros_offd); hypre_ParCSRMatrixInitialize(matrix); diag = hypre_ParCSRMatrixDiag(matrix); offd = hypre_ParCSRMatrixOffd(matrix); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); offd_i = hypre_CSRMatrixI(offd); if (num_nonzeros_offd) { offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); } first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix); first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix); last_col_diag = first_col_diag + (HYPRE_BigInt) num_cols - 1; diag_cnt = 0; offd_cnt = 0; row_cnt = 0; for (i = 0; i < num_nonzeros_diag + num_nonzeros_offd; i++) { /* read values */ hypre_fscanf(file, "%b %b %le", &I, &J, &data); i2 = (HYPRE_Int) (I - big_base_i - first_row_index); J -= big_base_j; if (i2 > row_cnt) { diag_i[i2] = diag_cnt; offd_i[i2] = offd_cnt; row_cnt++; } if (J < first_col_diag || J > last_col_diag) { tmp_j[offd_cnt] = J; offd_data[offd_cnt++] = data; } else { diag_j[diag_cnt] = (HYPRE_Int) (J - first_col_diag); diag_data[diag_cnt++] = data; } } diag_i[num_rows] = diag_cnt; offd_i[num_rows] = offd_cnt; fclose(file); /* generate col_map_offd */ if (num_nonzeros_offd) { aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_nonzeros_offd; i++) aux_offd_j[i] = (HYPRE_BigInt) offd_j[i]; hypre_BigQsort0(aux_offd_j, 0, num_nonzeros_offd - 1); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); col_map_offd[0] = aux_offd_j[0]; offd_cnt = 0; for (i = 1; i < num_nonzeros_offd; i++) { if (aux_offd_j[i] > col_map_offd[offd_cnt]) col_map_offd[++offd_cnt] = aux_offd_j[i]; } for (i = 0; i < num_nonzeros_offd; i++) { offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd); } hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(tmp_j, HYPRE_MEMORY_HOST); } /* move diagonal element in first position in each row */ for (i = 0; i < num_rows; i++) { i_col = diag_i[i]; for (j = i_col; j < diag_i[i + 1]; j++) { if (diag_j[j] == i) { diag_j[j] = diag_j[i_col]; data = diag_data[j]; diag_data[j] = diag_data[i_col]; diag_data[i_col] = data; diag_j[i_col] = i; break; } } } *base_i_ptr = base_i; *base_j_ptr = base_j; *matrix_ptr = matrix; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetLocalRange * returns the row numbers of the rows stored on this processor. * "End" is actually the row number of the last row on this processor. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetLocalRange(hypre_ParCSRMatrix * matrix, HYPRE_BigInt * row_start, HYPRE_BigInt * row_end, HYPRE_BigInt * col_start, HYPRE_BigInt * col_end) { HYPRE_Int my_id; if (!matrix) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(matrix), &my_id); *row_start = hypre_ParCSRMatrixFirstRowIndex(matrix); *row_end = hypre_ParCSRMatrixLastRowIndex(matrix); *col_start = hypre_ParCSRMatrixFirstColDiag(matrix); *col_end = hypre_ParCSRMatrixLastColDiag(matrix); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixGetRow * Returns global column indices and/or values for a given row in the global * matrix. Global row number is used, but the row must be stored locally or * an error is returned. This implementation copies from the two matrices that * store the local data, storing them in the hypre_ParCSRMatrix structure. * Only a single row can be accessed via this function at any one time; the * corresponding RestoreRow function must be called, to avoid bleeding memory, * and to be able to look at another row. * Either one of col_ind and values can be left null, and those values will * not be returned. * All indices are returned in 0-based indexing, no matter what is used under * the hood. EXCEPTION: currently this only works if the local CSR matrices * use 0-based indexing. * This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ * matrix code, adjusted for our data and software structures. * AJC 4/99. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixGetRowHost(hypre_ParCSRMatrix * mat, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { HYPRE_Int my_id; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return (-1); } hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(mat), &my_id); hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; if (row < row_start || row >= row_end) { return (-1); } /* * if buffer is not allocated and some information is requested, allocate * buffer */ if (!hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values)) { /* * allocate enough space to hold information from the longest row. */ HYPRE_Int max = 1, tmp; HYPRE_Int i; HYPRE_Int m = row_end - row_start; for (i = 0; i < m; i++) { tmp = hypre_CSRMatrixI(Aa)[i + 1] - hypre_CSRMatrixI(Aa)[i] + hypre_CSRMatrixI(Ba)[i + 1] - hypre_CSRMatrixI(Ba)[i]; if (max < tmp) { max = tmp; } } hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc(HYPRE_Complex, max, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc(HYPRE_BigInt, max, hypre_ParCSRMatrixMemoryLocation(mat)); } /* Copy from dual sequential matrices into buffer */ { HYPRE_Complex *vworkA, *vworkB, *v_p; HYPRE_Int i, *cworkA, *cworkB; HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat); HYPRE_Int nztot, nzA, nzB, lrow = (HYPRE_Int) (row - row_start); HYPRE_BigInt *cmap, *idx_p; nzA = hypre_CSRMatrixI(Aa)[lrow + 1] - hypre_CSRMatrixI(Aa)[lrow]; cworkA = &(hypre_CSRMatrixJ(Aa)[hypre_CSRMatrixI(Aa)[lrow]]); vworkA = &(hypre_CSRMatrixData(Aa)[hypre_CSRMatrixI(Aa)[lrow]]); nzB = hypre_CSRMatrixI(Ba)[lrow + 1] - hypre_CSRMatrixI(Ba)[lrow]; cworkB = &(hypre_CSRMatrixJ(Ba)[hypre_CSRMatrixI(Ba)[lrow]]); vworkB = &(hypre_CSRMatrixData(Ba)[hypre_CSRMatrixI(Ba)[lrow]]); nztot = nzA + nzB; cmap = hypre_ParCSRMatrixColMapOffd(mat); if (values || col_ind) { if (nztot) { /* * Sort by increasing column numbers, assuming A and B * already sorted */ HYPRE_Int imark = -1; if (values) { *values = v_p = hypre_ParCSRMatrixRowvalues(mat); for (i = 0; i < nzB; i++) { if (cmap[cworkB[i]] < cstart) { v_p[i] = vworkB[i]; } else { break; } } imark = i; for (i = 0; i < nzA; i++) { v_p[imark + i] = vworkA[i]; } for (i = imark; i < nzB; i++) { v_p[nzA + i] = vworkB[i]; } } if (col_ind) { *col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat); if (imark > -1) { for (i = 0; i < imark; i++) { idx_p[i] = cmap[cworkB[i]]; } } else { for (i = 0; i < nzB; i++) { if (cmap[cworkB[i]] < cstart) { idx_p[i] = cmap[cworkB[i]]; } else { break; } } imark = i; } for (i = 0; i < nzA; i++) { idx_p[imark + i] = cstart + cworkA[i]; } for (i = imark; i < nzB; i++) { idx_p[nzA + i] = cmap[cworkB[i]]; } } } else { if (col_ind) { *col_ind = 0; } if (values) { *values = 0; } } } *size = nztot; } /* End of copy */ return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixGetRow(hypre_ParCSRMatrix * mat, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(mat)); if (exec == HYPRE_EXEC_DEVICE) { return hypre_ParCSRMatrixGetRowDevice(mat, row, size, col_ind, values); } else #endif { return hypre_ParCSRMatrixGetRowHost(mat, row, size, col_ind, values); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixRestoreRow *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixRestoreRow(hypre_ParCSRMatrix * matrix, HYPRE_BigInt row, HYPRE_Int * size, HYPRE_BigInt ** col_ind, HYPRE_Complex ** values) { if (!hypre_ParCSRMatrixGetrowactive(matrix)) { hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } hypre_ParCSRMatrixGetrowactive(matrix) = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixToParCSRMatrix: * * Generates a ParCSRMatrix distributed across the processors in comm * from a CSRMatrix on proc 0 . * *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_CSRMatrixToParCSRMatrix(MPI_Comm comm, hypre_CSRMatrix * A, HYPRE_BigInt * global_row_starts, HYPRE_BigInt * global_col_starts) { hypre_ParCSRMatrix *parcsr_A; HYPRE_BigInt *global_data; HYPRE_BigInt global_size; HYPRE_BigInt global_num_rows; HYPRE_BigInt global_num_cols; HYPRE_Int num_procs, my_id; HYPRE_Int *num_rows_proc; HYPRE_Int *num_nonzeros_proc; HYPRE_BigInt *row_starts = NULL; HYPRE_BigInt *col_starts = NULL; hypre_CSRMatrix *local_A; HYPRE_Complex *A_data; HYPRE_Int *A_i; HYPRE_Int *A_j; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; hypre_MPI_Datatype *csr_matrix_datatypes; HYPRE_Int free_global_row_starts = 0; HYPRE_Int free_global_col_starts = 0; HYPRE_Int total_size; HYPRE_BigInt first_col_diag; HYPRE_BigInt last_col_diag; HYPRE_Int num_rows; HYPRE_Int num_nonzeros; HYPRE_Int i, ind; hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); total_size = 4; if (my_id == 0) { total_size += 2 * (num_procs + 1); } global_data = hypre_CTAlloc(HYPRE_BigInt, total_size, HYPRE_MEMORY_HOST); if (my_id == 0) { global_size = 3; if (global_row_starts) { if (global_col_starts) { if (global_col_starts != global_row_starts) { /* * contains code for what to expect, if 0: * global_row_starts = global_col_starts, only * global_row_starts given if 1: only global_row_starts * given, global_col_starts = NULL if 2: both * global_row_starts and global_col_starts given if 3: * only global_col_starts given, global_row_starts = NULL */ global_data[3] = 2; global_size += (HYPRE_BigInt) (2 * (num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } for (i = 0; i < (num_procs + 1); i++) { global_data[i + num_procs + 5] = global_col_starts[i]; } } else { global_data[3] = 0; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { global_data[3] = 1; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_row_starts[i]; } } } else { if (global_col_starts) { global_data[3] = 3; global_size += (HYPRE_BigInt) ((num_procs + 1) + 1); for (i = 0; i < (num_procs + 1); i++) { global_data[i + 4] = global_col_starts[i]; } } } global_data[0] = (HYPRE_BigInt) hypre_CSRMatrixNumRows(A); global_data[1] = (HYPRE_BigInt) hypre_CSRMatrixNumCols(A); global_data[2] = global_size; A_data = hypre_CSRMatrixData(A); A_i = hypre_CSRMatrixI(A); A_j = hypre_CSRMatrixJ(A); } hypre_MPI_Bcast(global_data, 3, HYPRE_MPI_BIG_INT, 0, comm); global_num_rows = global_data[0]; global_num_cols = global_data[1]; global_size = global_data[2]; if (global_size > 3) { HYPRE_Int send_start; if (global_data[3] == 2) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 4 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5 + (num_procs + 1); hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } else if ((global_data[3] == 0) || (global_data[3] == 1)) { row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &row_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); if (global_data[3] == 0) { col_starts = row_starts; } } else { col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); send_start = 4; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[0], 1, HYPRE_MPI_BIG_INT, 0, comm); send_start = 5; hypre_MPI_Scatter(&global_data[send_start], 1, HYPRE_MPI_BIG_INT, &col_starts[1], 1, HYPRE_MPI_BIG_INT, 0, comm); } } hypre_TFree(global_data, HYPRE_MEMORY_HOST); //Create ParCSR matrix parcsr_A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols, row_starts, col_starts, 0, 0, 0); //Allocate memory for building ParCSR matrix num_rows_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); num_nonzeros_proc = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { if (!global_row_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_row_starts); free_global_row_starts = 1; } if (!global_col_starts) { hypre_GeneratePartitioning(global_num_rows, num_procs, &global_col_starts); free_global_col_starts = 1; } for (i = 0; i < num_procs; i++) { num_rows_proc[i] = (HYPRE_Int) (global_row_starts[i + 1] - global_row_starts[i]); num_nonzeros_proc[i] = A_i[(HYPRE_Int) global_row_starts[i + 1]] - A_i[(HYPRE_Int) global_row_starts[i]]; } //num_nonzeros_proc[num_procs - 1] = A_i[(HYPRE_Int) global_num_rows] - A_i[(HYPRE_Int) row_starts[num_procs - 1]]; } hypre_MPI_Scatter(num_rows_proc, 1, HYPRE_MPI_INT, &num_rows, 1, HYPRE_MPI_INT, 0, comm); hypre_MPI_Scatter(num_nonzeros_proc, 1, HYPRE_MPI_INT, &num_nonzeros, 1, HYPRE_MPI_INT, 0, comm); /* RL: this is not correct: (HYPRE_Int) global_num_cols */ local_A = hypre_CSRMatrixCreate(num_rows, (HYPRE_Int) global_num_cols, num_nonzeros); csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_procs - 1, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_procs - 1, HYPRE_MEMORY_HOST); for (i = 1; i < num_procs; i++) { ind = A_i[(HYPRE_Int) global_row_starts[i]]; hypre_BuildCSRMatrixMPIDataType(num_nonzeros_proc[i], num_rows_proc[i], &A_data[ind], &A_i[(HYPRE_Int) global_row_starts[i]], &A_j[ind], &csr_matrix_datatypes[i]); hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm, &requests[i - 1]); hypre_MPI_Type_free(&csr_matrix_datatypes[i]); } hypre_CSRMatrixData(local_A) = A_data; hypre_CSRMatrixI(local_A) = A_i; hypre_CSRMatrixJ(local_A) = A_j; hypre_CSRMatrixOwnsData(local_A) = 0; hypre_MPI_Waitall(num_procs - 1, requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(num_rows_proc, HYPRE_MEMORY_HOST); hypre_TFree(num_nonzeros_proc, HYPRE_MEMORY_HOST); if (free_global_row_starts) { hypre_TFree(global_row_starts, HYPRE_MEMORY_HOST); } if (free_global_col_starts) { hypre_TFree(global_col_starts, HYPRE_MEMORY_HOST); } } else { hypre_CSRMatrixInitialize(local_A); hypre_BuildCSRMatrixMPIDataType(num_nonzeros, num_rows, hypre_CSRMatrixData(local_A), hypre_CSRMatrixI(local_A), hypre_CSRMatrixJ(local_A), &csr_matrix_datatypes[0]); hypre_MPI_Recv(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[0], 0, 0, comm, &status0); hypre_MPI_Type_free(csr_matrix_datatypes); } first_col_diag = hypre_ParCSRMatrixFirstColDiag(parcsr_A); last_col_diag = hypre_ParCSRMatrixLastColDiag(parcsr_A); GenerateDiagAndOffd(local_A, parcsr_A, first_col_diag, last_col_diag); /* set pointers back to NULL before destroying */ if (my_id == 0) { hypre_CSRMatrixData(local_A) = NULL; hypre_CSRMatrixI(local_A) = NULL; hypre_CSRMatrixJ(local_A) = NULL; } hypre_CSRMatrixDestroy(local_A); hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST); return parcsr_A; } /* RL: XXX this is not a scalable routine, see `marker' therein */ HYPRE_Int GenerateDiagAndOffd(hypre_CSRMatrix * A, hypre_ParCSRMatrix * matrix, HYPRE_BigInt first_col_diag, HYPRE_BigInt last_col_diag) { HYPRE_Int i, j; HYPRE_Int jo, jd; HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A); HYPRE_Complex *a_data = hypre_CSRMatrixData(A); HYPRE_Int *a_i = hypre_CSRMatrixI(A); /* * RL: XXX FIXME if A spans global column space, the following a_j should * be bigJ */ HYPRE_Int *a_j = hypre_CSRMatrixJ(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix); HYPRE_BigInt *col_map_offd; HYPRE_Complex *diag_data, *offd_data; HYPRE_Int *diag_i, *offd_i; HYPRE_Int *diag_j, *offd_j; HYPRE_Int *marker; HYPRE_Int num_cols_diag, num_cols_offd; HYPRE_Int first_elmt = a_i[0]; HYPRE_Int num_nonzeros = a_i[num_rows] - first_elmt; HYPRE_Int counter; num_cols_diag = (HYPRE_Int) (last_col_diag - first_col_diag + 1); num_cols_offd = 0; HYPRE_MemoryLocation memory_location = hypre_CSRMatrixMemoryLocation(A); if (num_cols - num_cols_diag) { hypre_CSRMatrixInitialize_v2(diag, 0, memory_location); diag_i = hypre_CSRMatrixI(diag); hypre_CSRMatrixInitialize_v2(offd, 0, memory_location); offd_i = hypre_CSRMatrixI(offd); marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols; i++) { marker[i] = 0; } jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { offd_i[i] = jo; diag_i[i] = jd; for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < first_col_diag || a_j[j] > last_col_diag) { if (!marker[a_j[j]]) { marker[a_j[j]] = 1; num_cols_offd++; } jo++; } else { jd++; } } } offd_i[num_rows] = jo; diag_i[num_rows] = jd; hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST); col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix); counter = 0; for (i = 0; i < num_cols; i++) { if (marker[i]) { col_map_offd[counter] = (HYPRE_BigInt) i; marker[i] = counter; counter++; } } hypre_CSRMatrixNumNonzeros(diag) = jd; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_j = hypre_CSRMatrixJ(diag); hypre_CSRMatrixNumNonzeros(offd) = jo; hypre_CSRMatrixNumCols(offd) = num_cols_offd; hypre_CSRMatrixInitialize(offd); offd_data = hypre_CSRMatrixData(offd); offd_j = hypre_CSRMatrixJ(offd); jo = 0; jd = 0; for (i = 0; i < num_rows; i++) { for (j = a_i[i] - first_elmt; j < a_i[i + 1] - first_elmt; j++) { if (a_j[j] < (HYPRE_Int) first_col_diag || a_j[j] > (HYPRE_Int) last_col_diag) { offd_data[jo] = a_data[j]; offd_j[jo++] = marker[a_j[j]]; } else { diag_data[jd] = a_data[j]; diag_j[jd++] = (HYPRE_Int) (a_j[j] - first_col_diag); } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); } else { hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros; hypre_CSRMatrixInitialize(diag); diag_data = hypre_CSRMatrixData(diag); diag_i = hypre_CSRMatrixI(diag); diag_j = hypre_CSRMatrixJ(diag); for (i = 0; i < num_nonzeros; i++) { diag_data[i] = a_data[i]; diag_j[i] = a_j[i]; } offd_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); for (i = 0; i < num_rows + 1; i++) { diag_i[i] = a_i[i]; offd_i[i] = 0; } hypre_CSRMatrixNumCols(offd) = 0; hypre_CSRMatrixI(offd) = offd_i; } return hypre_error_flag; } hypre_CSRMatrix * hypre_MergeDiagAndOffd(hypre_ParCSRMatrix * par_matrix) { hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix); hypre_CSRMatrix *matrix; HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix); HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag); HYPRE_Int *diag_i = hypre_CSRMatrixI(diag); HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag); HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag); HYPRE_Int *offd_i = hypre_CSRMatrixI(offd); HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd); HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd); HYPRE_Int *matrix_i; HYPRE_BigInt *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int num_nonzeros, i, j; HYPRE_Int count; HYPRE_Int size, rest, num_threads, ii; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(par_matrix); num_nonzeros = diag_i[num_rows] + offd_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = memory_location; hypre_CSRMatrixBigInitialize(matrix); matrix_i = hypre_CSRMatrixI(matrix); matrix_j = hypre_CSRMatrixBigJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); num_threads = hypre_NumThreads(); size = num_rows / num_threads; rest = num_rows - size * num_threads; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE #endif for (ii = 0; ii < num_threads; ii++) { HYPRE_Int ns, ne; if (ii < rest) { ns = ii * size + ii; ne = (ii + 1) * size + ii + 1; } else { ns = ii * size + rest; ne = (ii + 1) * size + rest; } count = diag_i[ns] + offd_i[ns];; for (i = ns; i < ne; i++) { matrix_i[i] = count; for (j = diag_i[i]; j < diag_i[i + 1]; j++) { matrix_data[count] = diag_data[j]; matrix_j[count++] = (HYPRE_BigInt) diag_j[j] + first_col_diag; } for (j = offd_i[i]; j < offd_i[i + 1]; j++) { matrix_data[count] = offd_data[j]; matrix_j[count++] = col_map_offd[offd_j[j]]; } } } /* end parallel region */ matrix_i[num_rows] = num_nonzeros; return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixToCSRMatrixAll: * generates a CSRMatrix from a ParCSRMatrix on all processors that have * parts of the ParCSRMatrix * Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1 *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix * par_matrix) { MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix); hypre_CSRMatrix *matrix; hypre_CSRMatrix *local_matrix; HYPRE_Int num_rows = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(par_matrix); HYPRE_Int num_cols = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumCols(par_matrix); HYPRE_Int *matrix_i; HYPRE_Int *matrix_j; HYPRE_Complex *matrix_data; HYPRE_Int *local_matrix_i; HYPRE_Int *local_matrix_j; HYPRE_Complex *local_matrix_data; HYPRE_Int i, j; HYPRE_Int local_num_rows; HYPRE_Int local_num_nonzeros; HYPRE_Int num_nonzeros; HYPRE_Int num_data; HYPRE_Int num_requests; HYPRE_Int vec_len, offset; HYPRE_Int start_index; HYPRE_Int proc_id; HYPRE_Int num_procs, my_id; HYPRE_Int num_types; HYPRE_Int *used_procs; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf = NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334; HYPRE_Int start; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); local_num_rows = (HYPRE_Int) (hypre_ParCSRMatrixLastRowIndex(par_matrix) - hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1); local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */ hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */ local_matrix_i = hypre_CSRMatrixI(local_matrix); local_matrix_j = hypre_CSRMatrixJ(local_matrix); local_matrix_data = hypre_CSRMatrixData(local_matrix); /* * determine procs that have vector data and store their ids in * used_procs */ /* * we need to do an exchange data for this. If I own row then I will * contact processor 0 with the endpoint of my local range */ if (local_num_rows > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = (HYPRE_Int) hypre_ParCSRMatrixLastRowIndex(par_matrix); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /* build the response object */ /* send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToCSRMatrix; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /* this is where we keep info * from contacts */ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void **)&response_recv_buf, &response_recv_buf_starts); /* * now processor 0 should have a list of ranges for processors that have * rows - these are in send_proc_obj - it needs to create the new list of * processors and also an array of vec starts - and send to those who own * row */ if (my_id) { if (local_num_rows) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types + 1, HYPRE_MEMORY_HOST); for (i = 1; i <= num_types; i++) { used_procs[i - 1] = send_info[i]; } for (i = num_types + 1; i < count; i++) { new_vec_starts[i - num_types - 1] = send_info[i]; } } else /* clean up and exit */ /* * hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); * hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); * hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); * if(response_recv_buf) hypre_TFree(response_recv_buf, * HYPRE_MEMORY_HOST); if(response_recv_buf_starts) * hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); * * * if (hypre_CSRMatrixOwnsData(local_matrix)) * hypre_CSRMatrixDestroy(local_matrix); else * hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); * * * return NULL; } } else /* my_id ==0 */ /* * num_types = send_proc_obj.length; used_procs = * hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); * new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, * HYPRE_MEMORY_HOST); * * new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { * used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = * send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, * num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); * /*now we need to put into an array to send */ count = 2 * num_types + 2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i = 1; i <= num_types; i++) { send_info[i] = (HYPRE_BigInt) used_procs[i - 1]; } for (i = num_types + 1; i < count; i++) { send_info[i] = new_vec_starts[i - num_types - 1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first */ start = 0; if (num_types && used_procs[0] == 0) { start = 1; } for (i = start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i - start]); } hypre_MPI_Waitall(num_types - start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if (response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if (response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_num_rows) { if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); return NULL; } /* * everyone left has rows and knows: new_vec_starts, num_types, and * used_procs */ /* this matrix should be rather small */ matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1, HYPRE_MEMORY_HOST); num_requests = 4 * num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* * exchange contents of local_matrix_i - here we are sending to ourself * also */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int) (new_vec_starts[i + 1] - new_vec_starts[i]); hypre_MPI_Irecv(&matrix_i[new_vec_starts[i] + 1], vec_len, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT, proc_id, tag2, comm, &requests[j++]); } hypre_MPI_Waitall(j, requests, status); /* generate matrix_i from received data */ /* global numbering? */ offset = matrix_i[new_vec_starts[1]]; for (i = 1; i < num_types; i++) { for (j = new_vec_starts[i]; j < new_vec_starts[i + 1]; j++) matrix_i[j + 1] += offset; offset = matrix_i[new_vec_starts[i + 1]]; } num_nonzeros = matrix_i[num_rows]; matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros); hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(matrix) = matrix_i; hypre_CSRMatrixInitialize(matrix); matrix_j = hypre_CSRMatrixJ(matrix); matrix_data = hypre_CSRMatrixData(matrix); /* * generate datatypes for further data exchange and exchange remaining * data, i.e. column info and actual data */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; start_index = matrix_i[(HYPRE_Int) new_vec_starts[i]]; num_data = matrix_i[(HYPRE_Int) new_vec_starts[i + 1]] - start_index; hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } local_num_nonzeros = local_matrix_i[local_num_rows]; for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX, used_procs[i], tag1, comm, &requests[j++]); hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT, used_procs[i], tag3, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); if (hypre_CSRMatrixOwnsData(local_matrix)) hypre_CSRMatrixDestroy(local_matrix); else hypre_TFree(local_matrix, HYPRE_MEMORY_HOST); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } return matrix; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixCopy, * copies B to A, * if copy_data = 0, only the structure of A is copied to B * the routine does not check whether the dimensions of A and B are compatible *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixCopy(hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B, HYPRE_Int copy_data) { hypre_CSRMatrix *A_diag; hypre_CSRMatrix *A_offd; HYPRE_BigInt *col_map_offd_A; hypre_CSRMatrix *B_diag; hypre_CSRMatrix *B_offd; HYPRE_BigInt *col_map_offd_B; HYPRE_Int num_cols_offd_A; HYPRE_Int num_cols_offd_B; if (!A) { hypre_error_in_arg(1); return hypre_error_flag; } if (!B) { hypre_error_in_arg(1); return hypre_error_flag; } A_diag = hypre_ParCSRMatrixDiag(A); A_offd = hypre_ParCSRMatrixOffd(A); B_diag = hypre_ParCSRMatrixDiag(B); B_offd = hypre_ParCSRMatrixOffd(B); num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); hypre_assert(num_cols_offd_A == num_cols_offd_B); col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B); hypre_CSRMatrixCopy(A_diag, B_diag, copy_data); hypre_CSRMatrixCopy(A_offd, B_offd, copy_data); /* should not happen if B has been initialized */ if (num_cols_offd_B && col_map_offd_B == NULL) { col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B; } hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToCSRMatrix * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToCSRMatrix(void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int * response_message_size) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt *) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse *) ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements *) response_obj->data2; hypre_MPI_Comm_rank(comm, &myid); /* * check to see if we need to allocate more space in send_proc_obj for * ids */ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length += 10; /* add space for 10 more * processors */ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /* initialize */ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /* this is the number of * elements */ /* send proc */ send_proc_obj->id[count] = contact_proc; /* do we need more storage for the elements? */ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /* populate send_proc_obj */ for (i = 0; i < contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count + 1] = index; send_proc_obj->length++; /* output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixUnion * Creates and returns a new matrix whose elements are the union of A and B. * Data is not copied, only structural information is created. * A and B must have the same communicator, numbers and distributions of rows * and columns (they can differ in which row-column pairs are nonzero, thus * in which columns are in a offd block) *--------------------------------------------------------------------------*/ hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion(hypre_ParCSRMatrix * A, hypre_ParCSRMatrix * B) { hypre_ParCSRMatrix *C; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int num_procs, my_id, p; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm, &my_id); hypre_MPI_Comm_size(comm, &num_procs); C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixComm(C) = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrixGlobalNumRows(C) = hypre_ParCSRMatrixGlobalNumRows(A); hypre_ParCSRMatrixGlobalNumCols(C) = hypre_ParCSRMatrixGlobalNumCols(A); hypre_ParCSRMatrixFirstRowIndex(C) = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(hypre_ParCSRMatrixFirstRowIndex(B) == hypre_ParCSRMatrixFirstRowIndex(A)); hypre_ParCSRMatrixRowStarts(C) = hypre_ParCSRMatrixRowStarts(A); hypre_ParCSRMatrixOwnsRowStarts(C) = 0; hypre_ParCSRMatrixColStarts(C) = hypre_ParCSRMatrixColStarts(A); hypre_ParCSRMatrixOwnsColStarts(C) = 0; for (p = 0; p <= num_procs; ++p) hypre_assert(hypre_ParCSRMatrixColStarts(A) == hypre_ParCSRMatrixColStarts(B)); hypre_ParCSRMatrixFirstColDiag(C) = hypre_ParCSRMatrixFirstColDiag(A); hypre_ParCSRMatrixLastRowIndex(C) = hypre_ParCSRMatrixLastRowIndex(A); hypre_ParCSRMatrixLastColDiag(C) = hypre_ParCSRMatrixLastColDiag(A); hypre_ParCSRMatrixDiag(C) = hypre_CSRMatrixUnion(hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B), 0, 0, 0); hypre_ParCSRMatrixOffd(C) = hypre_CSRMatrixUnion(hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C); hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C; hypre_ParCSRMatrixCommPkg(C) = NULL; hypre_ParCSRMatrixCommPkgT(C) = NULL; hypre_ParCSRMatrixOwnsData(C) = 1; /* * SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce. * I suspect, but don't know, that other parts of hypre do not assume * that the correct values have been set. * hypre_ParCSRMatrixSetNumNonzeros( C ); * hypre_ParCSRMatrixSetDNumNonzeros( C ); */ hypre_ParCSRMatrixNumNonzeros(C) = 0; hypre_ParCSRMatrixDNumNonzeros(C) = 0.0; hypre_ParCSRMatrixRowindices(C) = NULL; hypre_ParCSRMatrixRowvalues(C) = NULL; hypre_ParCSRMatrixGetrowactive(C) = 0; return C; } /* * drop the entries that are not on the diagonal and smaller than its row * norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntries(hypre_ParCSRMatrix * A, HYPRE_Real tol, HYPRE_Int type) { HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i; MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int *marker_offd = NULL; HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int my_id, num_procs; /* MPI size and rank */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (tol <= 0.0) { return hypre_error_flag; } marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0; for (i = 0; i < nrow_local; i++) { /* compute row norm */ HYPRE_Real row_nrm = 0.0; for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_a[j]; if (type == 1) { row_nrm += fabs(v); } else if (type == 2) { row_nrm += v * v; } else { row_nrm = hypre_max(row_nrm, fabs(v)); } } } if (type == 2) { row_nrm = sqrt(row_nrm); } /* drop small entries based on tol and row norm */ for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++) { HYPRE_Int col = A_diag_j[j]; HYPRE_Complex val = A_diag_a[j]; if (i == col || fabs(val) >= tol * row_nrm) { A_diag_j[nnz_diag] = col; A_diag_a[nnz_diag] = val; nnz_diag++; } } if (num_procs > 1) { for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++) { HYPRE_Int col = A_offd_j[j]; HYPRE_Complex val = A_offd_a[j]; /* * in normal cases: diagonal entry should not appear in * A_offd (but this can still be possible) */ if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm) { if (0 == marker_offd[col]) { marker_offd[col] = 1; } A_offd_j[nnz_offd] = col; A_offd_a[nnz_offd] = val; nnz_offd++; } } } A_diag_i_i = A_diag_i[i + 1]; A_offd_i_i = A_offd_i[i + 1]; A_diag_i[i + 1] = nnz_diag; A_offd_i[i + 1] = nnz_offd; } hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag; hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd; hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); for (i = 0, k = 0; i < num_cols_A_offd; i++) { if (marker_offd[i]) { col_map_offd_A[k] = col_map_offd_A[i]; marker_offd[i] = k++; } } /* num_cols_A_offd = k; */ hypre_CSRMatrixNumCols(A_offd) = k; for (i = 0; i < nnz_offd; i++) { A_offd_j[i] = marker_offd[A_offd_j[i]]; } if (hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(A)); } hypre_MatvecCommPkgCreate(A); hypre_TFree(marker_offd, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* * Perform dual truncation of ParCSR matrix. This code is adapted from * original BoomerAMGInterpTruncate() A: parCSR matrix to be modified tol: * relative tolerance or truncation factor for dropping small terms * max_row_elmts: maximum number of (largest) nonzero elements to keep. * rescale: Boolean on whether or not to scale resulting matrix. Scaling for * each row satisfies: sum(nonzero values before dropping)/ sum(nonzero * values after dropping), this way, the application of the truncated matrix * on a constant vector is the same as that of the original matrix. nrm_type: * type of norm used for dropping with tol. -- 0 = infinity-norm -- 1 = * 1-norm -- 2 = 2-norm */ HYPRE_Int hypre_ParCSRMatrixTruncate(hypre_ParCSRMatrix * A, HYPRE_Real tol, HYPRE_Int max_row_elmts, HYPRE_Int rescale, HYPRE_Int nrm_type) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] -= hypre_MPI_Wtime(); #endif hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_j_new; HYPRE_Real *A_diag_data_new; hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j_new; HYPRE_Real *A_offd_data_new; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int i, j, start_j; HYPRE_Int ierr = 0; HYPRE_Int next_open; HYPRE_Int now_checking; HYPRE_Int num_lost; HYPRE_Int num_lost_global = 0; HYPRE_Int next_open_offd; HYPRE_Int now_checking_offd; HYPRE_Int num_lost_offd; HYPRE_Int num_lost_global_offd; HYPRE_Int A_diag_size; HYPRE_Int A_offd_size; HYPRE_Int num_elmts; HYPRE_Int cnt, cnt_diag, cnt_offd; HYPRE_Real row_nrm; HYPRE_Real drop_coeff; HYPRE_Real row_sum; HYPRE_Real scale; HYPRE_MemoryLocation memory_location_diag = hypre_CSRMatrixMemoryLocation(A_diag); HYPRE_MemoryLocation memory_location_offd = hypre_CSRMatrixMemoryLocation(A_offd); /* * Threading variables. Entry i of num_lost_(offd_)per_thread holds the * number of dropped entries over thread i's row range. * Cum_lost_per_thread will temporarily store the cumulative number of * dropped entries up to each thread. */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int *max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int *cum_lost_per_thread; HYPRE_Int *num_lost_per_thread; HYPRE_Int *num_lost_offd_per_thread; /* Initialize threading variables */ max_num_threads[0] = hypre_NumThreads(); cum_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); num_lost_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i = 0; i < max_num_threads[0]; i++) { num_lost_per_thread[i] = 0; num_lost_offd_per_thread[i] = 0; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,row_nrm, drop_coeff,j,start_j,row_sum,scale,num_lost,now_checking,next_open,num_lost_offd,now_checking_offd,next_open_offd,start,stop,cnt_diag,cnt_offd,num_elmts,cnt) #endif { my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); /* * Compute each thread's range of rows to truncate and compress. * Note, that i, j and data are all compressed as entries are * dropped, but that the compression only occurs locally over each * thread's row range. A_diag_i is only made globally consistent at * the end of this routine. During the dropping phases, * A_diag_i[stop] will point to the start of the next thread's row * range. */ /* my row range */ start = (n_fine / num_threads) * my_thread_num; if (my_thread_num == num_threads - 1) { stop = n_fine; } else { stop = (n_fine / num_threads) * (my_thread_num + 1); } /* * Truncate based on truncation tolerance */ if (tol > 0) { num_lost = 0; num_lost_offd = 0; next_open = A_diag_i[start]; now_checking = A_diag_i[start]; next_open_offd = A_offd_i[start];; now_checking_offd = A_offd_i[start];; for (i = start; i < stop; i++) { row_nrm = 0; /* compute norm for dropping small terms */ if (nrm_type == 0) { /* infty-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_diag_data[j])) ? fabs(A_diag_data[j]) : row_nrm; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm = (row_nrm < fabs(A_offd_data[j])) ? fabs(A_offd_data[j]) : row_nrm; } } if (nrm_type == 1) { /* 1-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { row_nrm += fabs(A_diag_data[j]); } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { row_nrm += fabs(A_offd_data[j]); } } if (nrm_type == 2) { /* 2-norm */ for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { HYPRE_Complex v = A_diag_data[j]; row_nrm += v * v; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { HYPRE_Complex v = A_offd_data[j]; row_nrm += v * v; } row_nrm = sqrt(row_nrm); } drop_coeff = tol * row_nrm; start_j = A_diag_i[i]; if (num_lost) { A_diag_i[i] -= num_lost; } row_sum = 0; scale = 0; for (j = start_j; j < A_diag_i[i + 1]; j++) { row_sum += A_diag_data[now_checking]; if (fabs(A_diag_data[now_checking]) < drop_coeff) { num_lost++; now_checking++; } else { scale += A_diag_data[now_checking]; A_diag_data[next_open] = A_diag_data[now_checking]; A_diag_j[next_open] = A_diag_j[now_checking]; now_checking++; next_open++; } } start_j = A_offd_i[i]; if (num_lost_offd) { A_offd_i[i] -= num_lost_offd; } for (j = start_j; j < A_offd_i[i + 1]; j++) { row_sum += A_offd_data[now_checking_offd]; if (fabs(A_offd_data[now_checking_offd]) < drop_coeff) { num_lost_offd++; now_checking_offd++; } else { scale += A_offd_data[now_checking_offd]; A_offd_data[next_open_offd] = A_offd_data[now_checking_offd]; A_offd_j[next_open_offd] = A_offd_j[now_checking_offd]; now_checking_offd++; next_open_offd++; } } /* scale row of A */ if (rescale && scale != 0.) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < (A_diag_i[i + 1] - num_lost); j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < (A_offd_i[i + 1] - num_lost_offd); j++) { A_offd_data[j] *= scale; } } } } /* end loop for (i = 0; i < n_fine; i++) */ /* store number of dropped elements and number of threads */ if (my_thread_num == 0) { max_num_threads[0] = num_threads; } num_lost_per_thread[my_thread_num] = num_lost; num_lost_offd_per_thread[my_thread_num] = num_lost_offd; } /* end if (trunc_factor > 0) */ /* * Truncate based on capping the nnz per row * */ if (max_row_elmts > 0) { HYPRE_Int A_mxnum, cnt1, last_index, last_index_offd; HYPRE_Int *A_aux_j; HYPRE_Real *A_aux_data; /* find maximum row length locally over this row range */ A_mxnum = 0; for (i = start; i < stop; i++) { /* * Note A_diag_i[stop] is the starting point for the next * thread in j and data, not the stop point for this thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } cnt1 = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (cnt1 > A_mxnum) { A_mxnum = cnt1; } } /* * Some rows exceed max_row_elmts, and require truncation. * Essentially, each thread truncates and compresses its range of * rows locally. */ if (A_mxnum > max_row_elmts) { num_lost = 0; num_lost_offd = 0; /* * two temporary arrays to hold row i for temporary * operations */ A_aux_j = hypre_CTAlloc(HYPRE_Int, A_mxnum, HYPRE_MEMORY_HOST); A_aux_data = hypre_CTAlloc(HYPRE_Real, A_mxnum, HYPRE_MEMORY_HOST); cnt_diag = A_diag_i[start]; cnt_offd = A_offd_i[start]; for (i = start; i < stop; i++) { /* * Note A_diag_i[stop] is the starting point for the next * thread in j and data, not the stop point for this * thread */ last_index = A_diag_i[i + 1]; last_index_offd = A_offd_i[i + 1]; if (i == stop - 1) { last_index -= num_lost_per_thread[my_thread_num]; last_index_offd -= num_lost_offd_per_thread[my_thread_num]; } row_sum = 0; num_elmts = last_index - A_diag_i[i] + last_index_offd - A_offd_i[i]; if (max_row_elmts < num_elmts) { /* * copy both diagonal and off-diag parts of row i to * _aux_ arrays */ cnt = 0; for (j = A_diag_i[i]; j < last_index; j++) { A_aux_j[cnt] = A_diag_j[j]; A_aux_data[cnt++] = A_diag_data[j]; row_sum += A_diag_data[j]; } num_lost += cnt; cnt1 = cnt; for (j = A_offd_i[i]; j < last_index_offd; j++) { A_aux_j[cnt] = A_offd_j[j] + num_cols; A_aux_data[cnt++] = A_offd_data[j]; row_sum += A_offd_data[j]; } num_lost_offd += cnt - cnt1; /* sort data */ hypre_qsort2_abs(A_aux_j, A_aux_data, 0, cnt - 1); scale = 0; if (i > start) { A_diag_i[i] = cnt_diag; A_offd_i[i] = cnt_offd; } for (j = 0; j < max_row_elmts; j++) { scale += A_aux_data[j]; if (A_aux_j[j] < num_cols) { A_diag_j[cnt_diag] = A_aux_j[j]; A_diag_data[cnt_diag++] = A_aux_data[j]; } else { A_offd_j[cnt_offd] = A_aux_j[j] - num_cols; A_offd_data[cnt_offd++] = A_aux_data[j]; } } num_lost -= cnt_diag - A_diag_i[i]; num_lost_offd -= cnt_offd - A_offd_i[i]; /* scale row of A */ if (rescale && (scale != 0.)) { if (scale != row_sum) { scale = row_sum / scale; for (j = A_diag_i[i]; j < cnt_diag; j++) { A_diag_data[j] *= scale; } for (j = A_offd_i[i]; j < cnt_offd; j++) { A_offd_data[j] *= scale; } } } } /* end if (max_row_elmts < num_elmts) */ else { /* * nothing dropped from this row, but still have to * shift entries back by the number dropped so far */ if (A_diag_i[i] != cnt_diag) { start_j = A_diag_i[i]; A_diag_i[i] = cnt_diag; for (j = start_j; j < last_index; j++) { A_diag_j[cnt_diag] = A_diag_j[j]; A_diag_data[cnt_diag++] = A_diag_data[j]; } } else { cnt_diag += last_index - A_diag_i[i]; } if (A_offd_i[i] != cnt_offd) { start_j = A_offd_i[i]; A_offd_i[i] = cnt_offd; for (j = start_j; j < last_index_offd; j++) { A_offd_j[cnt_offd] = A_offd_j[j]; A_offd_data[cnt_offd++] = A_offd_data[j]; } } else { cnt_offd += last_index_offd - A_offd_i[i]; } } } /* end for (i = 0; i < n_fine; i++) */ num_lost_per_thread[my_thread_num] += num_lost; num_lost_offd_per_thread[my_thread_num] += num_lost_offd; hypre_TFree(A_aux_j, HYPRE_MEMORY_HOST); hypre_TFree(A_aux_data, HYPRE_MEMORY_HOST); } /* end if (A_mxnum > max_row_elmts) */ } /* end if (max_row_elmts > 0) */ /* Sum up num_lost_global */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { num_lost_global = 0; num_lost_global_offd = 0; for (i = 0; i < max_num_threads[0]; i++) { num_lost_global += num_lost_per_thread[i]; num_lost_global_offd += num_lost_offd_per_thread[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * Synchronize and create new diag data structures */ if (num_lost_global) { /* * Each thread has it's own locally compressed CSR matrix from * rows start to stop. Now, we have to copy each thread's chunk * into the new process-wide CSR data structures * * First, we compute the new process-wide number of nonzeros (i.e., * A_diag_size), and compute cum_lost_per_thread[k] so that this * entry holds the cumulative sum of entries dropped up to and * including thread k. */ if (my_thread_num == 0) { A_diag_size = A_diag_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_diag_size -= num_lost_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_per_thread[i]; } } A_diag_j_new = hypre_CTAlloc(HYPRE_Int, A_diag_size, memory_location_diag); A_diag_data_new = hypre_CTAlloc(HYPRE_Real, A_diag_size, memory_location_diag); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * points to next open spot in new data structures for this * thread */ if (my_thread_num == 0) { next_open = 0; } else { /* * remember, cum_lost_per_thread[k] stores the num dropped up * to and including thread k */ next_open = A_diag_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_diag_i[start]; i < A_diag_i[stop] - num_lost_per_thread[my_thread_num]; i++) { A_diag_j_new[next_open] = A_diag_j[i]; A_diag_data_new[next_open] = A_diag_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * update A_diag_i with number of dropped entries by all lower * ranked threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_diag_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_diag_i[n_fine] = A_diag_size; hypre_TFree(A_diag_j, memory_location_diag); hypre_TFree(A_diag_data, memory_location_diag); hypre_CSRMatrixJ(A_diag) = A_diag_j_new; hypre_CSRMatrixData(A_diag) = A_diag_data_new; hypre_CSRMatrixNumNonzeros(A_diag) = A_diag_size; } } /* * Synchronize and create new offd data structures */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (num_lost_global_offd) { /* Repeat process for off-diagonal */ if (my_thread_num == 0) { A_offd_size = A_offd_i[n_fine]; for (i = 0; i < max_num_threads[0]; i++) { A_offd_size -= num_lost_offd_per_thread[i]; if (i > 0) { cum_lost_per_thread[i] = num_lost_offd_per_thread[i] + cum_lost_per_thread[i - 1]; } else { cum_lost_per_thread[i] = num_lost_offd_per_thread[i]; } } A_offd_j_new = hypre_CTAlloc(HYPRE_Int, A_offd_size, memory_location_offd); A_offd_data_new = hypre_CTAlloc(HYPRE_Real, A_offd_size, memory_location_offd); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * points to next open spot in new data structures for this * thread */ if (my_thread_num == 0) { next_open = 0; } else { /* * remember, cum_lost_per_thread[k] stores the num dropped up * to and including thread k */ next_open = A_offd_i[start] - cum_lost_per_thread[my_thread_num - 1]; } /* copy the j and data arrays over */ for (i = A_offd_i[start]; i < A_offd_i[stop] - num_lost_offd_per_thread[my_thread_num]; i++) { A_offd_j_new[next_open] = A_offd_j[i]; A_offd_data_new[next_open] = A_offd_data[i]; next_open += 1; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /* * update A_offd_i with number of dropped entries by all lower * ranked threads */ if (my_thread_num > 0) { for (i = start; i < stop; i++) { A_offd_i[i] -= cum_lost_per_thread[my_thread_num - 1]; } } if (my_thread_num == 0) { /* Set last entry */ A_offd_i[n_fine] = A_offd_size; hypre_TFree(A_offd_j, memory_location_offd); hypre_TFree(A_offd_data, memory_location_offd); hypre_CSRMatrixJ(A_offd) = A_offd_j_new; hypre_CSRMatrixData(A_offd) = A_offd_data_new; hypre_CSRMatrixNumNonzeros(A_offd) = A_offd_size; } } } /* end parallel region */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(cum_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_per_thread, HYPRE_MEMORY_HOST); hypre_TFree(num_lost_offd_per_thread, HYPRE_MEMORY_HOST); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_INTERP_TRUNC] += hypre_MPI_Wtime(); #endif return ierr; }
jacobi.c
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve paralleism. * All do loops are parallized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 500 int n,m,mits; double tol,relax=1.0,alpha=0.0543; double u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; double dx,dy; int main (void) { float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialized. * * Working variables/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ #pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { double omega; int i,j,k; double error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ #pragma omp parallel { #pragma omp for private(j,i) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp for private(resid,j,i) reduction(+:error) nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } } /* omp end parallel */ /* Error check */ k = k + 1; if (k%500==0) printf("Finished %d iteration.\n",k); error = sqrt(error)/(n*m); } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; double xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; #pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); }
#include <stdio.h> #include <math.h> //Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6 * t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve paralleism. * All do loops are parallized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 500 int n, m, mits; double tol, relax = 1.0, alpha = 0.0543; double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE]; double dx, dy; int main(void) { float toler; /* * printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); * canf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance * for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; * intf("Input mits - Maximum iterations for solver\n"); * s); */ n = MSIZE; m = MSIZE; tol = 0.0000000001; mits = 5000; driver(); return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialized. * * Working variables/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver() { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi(); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n", time2 - time1); /* error_check (n,m,alpha,dx,dy,u,f) */ error_check(); } /* * subroutine initialize (n,m,alpha,dx,dy,u,f) ***************************************************** * * Initializes data Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * **************************************************** */ void initialize() { int i, j, xx, yy; //double PI = 3.1415926; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (int)(-1.0 + dx * (i - 1)); yy = (int)(-1.0 + dy * (j - 1)); u[i][j] = 0.0; f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy) \ -2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy); } } /* * subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ***************************************************************** * * Subroutine HelmholtzJ Solves poisson equation on rectangular grid assuming : * 1) Uniform discretization in each direction, and (2) Dirichlect boundary * conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions dx,dy Grid * spacing in the X/Y directions alpha Helmholtz eqn. coefficient omega * Relaxation factor f(n,m) Right hand side function u(n,m) Dependent * variable/Solution tol Tolerance for iterative solver maxit Maximum * number of iterations * * Output : u(n,m) - Solution *************************************************************** */ void jacobi() { double omega; int i, j, k; double error, resid, ax, ay, b; //double error_local; //float ta, tb, tc, td, te, ta1, ta2, tb1, tb2, tc1, tc2, td1, td2; //float te1, te2; //float second; omega = relax; /* * Initialize coefficients */ ax = 1.0 / (dx * dx); /* X-direction coef */ ay = 1.0 / (dy * dy); /* Y-direction coef */ b = -2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while ((k <= mits) && (error > tol)) { error = 0.0; /* Copy new solution into old */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < (n - 1); i++) for (j = 1; j < (m - 1); j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) \ +ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* omp end parallel */ /* Error check */ k = k + 1; if (k % 500 == 0) printf("Finished %d iteration.\n", k); error = sqrt(error) / (n * m); } /* End iteration loop */ printf("Total Number of Iterations:%d\n", k); printf("Residual:%E\n", error); } /* * subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none *********************************************************** * * Checks error between numerical and exact solution * ********************************************************** */ void error_check() { int i, j; double xx, yy, temp, error; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); error = 0.0; for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = -1.0 + dx * (i - 1); yy = -1.0 + dy * (j - 1); temp = u[i][j] - (1.0 - xx * xx) * (1.0 - yy * yy); error = error + temp * temp; } error = sqrt(error) / (n * m); printf("Solution Error :%E \n", error); }
#include <stdio.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /* */ //Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t, NULL); time = t.tv_sec + 1.0e-6 * t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve paralleism. * All do loops are parallized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 500 int n, m, mits; double tol, relax = 1.0, alpha = 0.0543; double u[MSIZE][MSIZE], f[MSIZE][MSIZE], uold[MSIZE][MSIZE]; double dx, dy; int main(void) { float toler; /* * printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); * canf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance * for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; * intf("Input mits - Maximum iterations for solver\n"); * s); */ n = MSIZE; m = MSIZE; tol = 0.0000000001; mits = 5000; #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n", omp_get_num_threads()); } #endif /* */ driver(); return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialized. * * Working variables/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver() { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi(); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n", time2 - time1); /* error_check (n,m,alpha,dx,dy,u,f) */ error_check(); } /* * subroutine initialize (n,m,alpha,dx,dy,u,f) ***************************************************** * * Initializes data Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * **************************************************** */ void initialize() { int i, j, xx, yy; //double PI = 3.1415926; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); /* Initialize initial condition and RHS */ #pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (int)(-1.0 + dx * (i - 1)); yy = (int)(-1.0 + dy * (j - 1)); u[i][j] = 0.0; f[i][j] = -1.0 * alpha * (1.0 - xx * xx) * (1.0 - yy * yy) \ -2.0 * (1.0 - xx * xx) - 2.0 * (1.0 - yy * yy); } } /* * subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ***************************************************************** * * Subroutine HelmholtzJ Solves poisson equation on rectangular grid assuming : * 1) Uniform discretization in each direction, and (2) Dirichlect boundary * conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions dx,dy Grid * spacing in the X/Y directions alpha Helmholtz eqn. coefficient omega * Relaxation factor f(n,m) Right hand side function u(n,m) Dependent * variable/Solution tol Tolerance for iterative solver maxit Maximum * number of iterations * * Output : u(n,m) - Solution *************************************************************** */ void jacobi() { double omega; int i, j, k; double error, resid, ax, ay, b; //double error_local; //float ta, tb, tc, td, te, ta1, ta2, tb1, tb2, tc1, tc2, td1, td2; //float te1, te2; //float second; omega = relax; /* * Initialize coefficients */ ax = 1.0 / (dx * dx); /* X-direction coef */ ay = 1.0 / (dy * dy); /* Y-direction coef */ b = -2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while ((k <= mits) && (error > tol)) { error = 0.0; /* Copy new solution into old */ #pragma omp parallel { #pragma omp for private(j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; #pragma omp for private(resid,j,i) reduction(+:error) nowait for (i = 1; i < (n - 1); i++) for (j = 1; j < (m - 1); j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) \ +ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } } /* omp end parallel */ /* Error check */ k = k + 1; if (k % 500 == 0) printf("Finished %d iteration.\n", k); error = sqrt(error) / (n * m); } /* End iteration loop */ printf("Total Number of Iterations:%d\n", k); printf("Residual:%E\n", error); } /* * subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none *********************************************************** * * Checks error between numerical and exact solution * ********************************************************** */ void error_check() { int i, j; double xx, yy, temp, error; dx = 2.0 / (n - 1); dy = 2.0 / (m - 1); error = 0.0; #pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = -1.0 + dx * (i - 1); yy = -1.0 + dy * (j - 1); temp = u[i][j] - (1.0 - xx * xx) * (1.0 - yy * yy); error = error + temp * temp; } error = sqrt(error) / (n * m); printf("Solution Error :%E \n", error); }
pmv-OpenMP-b_atcgrid.c
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() #ifdef _OPENMP #include <omp.h> #else #define omp_set_dynamic(0); #define omp_set_num_threads(12); #endif int main(int argc, char ** argv){ int **M; int *v1, *v2; int i, k, N; double cgt1, cgt2, ncgt; //para tiempo de ejecución time_t t; // Semilla de rand() srand((unsigned) time(&t)); // Obtenemos el numero de filas x columnas de la matriz cuadrada if(argc < 2){ fprintf(stderr,"Falta iteraciones\n"); exit(-1); } N = atoi(argv[1]); // == Reserva de Memoria // ====================================================> v1 = (int *) malloc(N*sizeof(int)); v2 = (int *) malloc(N*sizeof(int)); if ( v1 == NULL || v2 == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } M = (int**) malloc (N*sizeof(int*)); // i como private en un for establece que cada hebra tendra una copia de i, pero en parallel for tendra cada una i como sigue // i = 0, i = 3, i = 6 para un bucle de N = 9 #pragma omp parallel for shared(M,N) private(i) default(none) for(i = 0; i<N; i++){ M[i] = (int*) malloc (N*sizeof(int)); if( M[i] == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } } // == Inicializacion // ====================================================> // M, v1, v2, N, i compartidas // Cada hebra se encargará de una parte del bucle usando i // k es privada // Para que cada hebra que este calculando la parte iesima del bucle y tenga una copia de k = 0 propia, parte k es secuencial for(i = 0; i<N; i++){ #pragma omp parallel for shared(M,i,N) private(k) default(none) for(k = 0; k<N; k++) M[i][k] = rand() % 8; } #pragma omp parallel for shared(v1,v2,N) private(i) default(none) for(i = 0; i<N; i++){ v1[i] = rand() % 6; v2[i] = 0; } // == Calculo // ====================================================> cgt1 = omp_get_wtime(); // Dejamos el vector resultado v2 lo dejo como shared para que todas las hebras puedan acceder a el sin necesidad de tener una copia // local, pero los accesos a ese vector las hebras lo tienen que hacer de forma atomica sin interfoliaciones. for(i = 0; i<N; i++){ #pragma omp parallel shared(M,i,N,v2,v1) private(k) default(none) { int sumalocal = 0; #pragma omp for for(k = 0; k<N; k++) sumalocal += M[i][k] * v1[k]; #pragma omp atomic v2[i] += sumalocal; } } cgt2 = omp_get_wtime(); ncgt = (double)(cgt2 - cgt1); // == Imprimir Mensajes // ====================================================> printf("Tiempo(seg.):%11.9f\n", ncgt); printf("Tamaño de los vectores: %u\n", N); printf("\tv1 = %uElem -> %lu bytes\n\tv2 = %uElem -> %lu bytes\n", N, N*sizeof(int), N, N*sizeof(int)); printf("Tamaño de la matriz: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int)); // Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador // eliminen el código de la suma. printf("v2[0] = %u ... v2[N-1] = %u \n", v2[0], v2[N-1]); // Para tamaños pequeños de N < 15 mostrar los valores calculados if(N < 15){ printf("\n----------- Matriz M ----------- \n"); for(i = 0; i<N; i++){ for(k = 0; k<N; k++) printf("%u\t", M[i][k]); printf("\n"); } printf("\n----------- Vector V1 ----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v1[i]); printf("\n"); printf("\n----------- Vector V2----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v2[i]); printf("\n"); } // == Liberar Memoria // ====================================================> free(v1); free(v2); #pragma omp parallel for shared(M,N) private(i) default(none) for(i = 0; i<N; i++) free(M[i]); free(M); }
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() int main(int argc, char ** argv){ int **M; int *v1, *v2; int i, k, N; double cgt1, cgt2, ncgt; //para tiempo de ejecución time_t t; // Semilla de rand() srand((unsigned) time(&t)); // Obtenemos el numero de filas x columnas de la matriz cuadrada if(argc < 2){ fprintf(stderr,"Falta iteraciones\n"); exit(-1); } N = atoi(argv[1]); // == Reserva de Memoria // ====================================================> v1 = (int *) malloc(N*sizeof(int)); v2 = (int *) malloc(N*sizeof(int)); if ( v1 == NULL || v2 == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } M = (int**) malloc (N*sizeof(int*)); // i como private en un for establece que cada hebra tendra una copia de i, pero en parallel for tendra cada una i como sigue // i = 0, i = 3, i = 6 para un bucle de N = 9 for(i = 0; i<N; i++){ M[i] = (int*) malloc (N*sizeof(int)); if( M[i] == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } } // == Inicializacion // ====================================================> // M, v1, v2, N, i compartidas // Cada hebra se encargará de una parte del bucle usando i // k es privada // Para que cada hebra que este calculando la parte iesima del bucle y tenga una copia de k = 0 propia, parte k es secuencial for(i = 0; i<N; i++){ for(k = 0; k<N; k++) M[i][k] = rand() % 8; } for(i = 0; i<N; i++){ v1[i] = rand() % 6; v2[i] = 0; } // == Calculo // ====================================================> cgt1 = omp_get_wtime(); // Dejamos el vector resultado v2 lo dejo como shared para que todas las hebras puedan acceder a el sin necesidad de tener una copia // local, pero los accesos a ese vector las hebras lo tienen que hacer de forma atomica sin interfoliaciones. for(i = 0; i<N; i++){ int sumalocal = 0; for(k = 0; k<N; k++) sumalocal += M[i][k] * v1[k]; v2[i] += sumalocal; } cgt2 = omp_get_wtime(); ncgt = (double)(cgt2 - cgt1); // == Imprimir Mensajes // ====================================================> printf("Tiempo(seg.):%11.9f\n", ncgt); printf("Tamaño de los vectores: %u\n", N); printf("\tv1 = %uElem -> %lu bytes\n\tv2 = %uElem -> %lu bytes\n", N, N*sizeof(int), N, N*sizeof(int)); printf("Tamaño de la matriz: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int)); // Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador // eliminen el código de la suma. printf("v2[0] = %u ... v2[N-1] = %u \n", v2[0], v2[N-1]); // Para tamaños pequeños de N < 15 mostrar los valores calculados if(N < 15){ printf("\n----------- Matriz M ----------- \n"); for(i = 0; i<N; i++){ for(k = 0; k<N; k++) printf("%u\t", M[i][k]); printf("\n"); } printf("\n----------- Vector V1 ----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v1[i]); printf("\n"); printf("\n----------- Vector V2----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v2[i]); printf("\n"); } // == Liberar Memoria // ====================================================> free(v1); free(v2); for(i = 0; i<N; i++) free(M[i]); free(M); }
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free() #include <stdio.h> // biblioteca donde se encuentra la función printf() #ifdef _OPENMP #include <omp.h> #else #define omp_set_dynamic(0); #define omp_set_num_threads(12); #endif int main(int argc, char ** argv){ int **M; int *v1, *v2; int i, k, N; double cgt1, cgt2, ncgt; //para tiempo de ejecución time_t t; // Semilla de rand() srand((unsigned) time(&t)); // Obtenemos el numero de filas x columnas de la matriz cuadrada if(argc < 2){ fprintf(stderr,"Falta iteraciones\n"); exit(-1); } N = atoi(argv[1]); // == Reserva de Memoria // ====================================================> v1 = (int *) malloc(N*sizeof(int)); v2 = (int *) malloc(N*sizeof(int)); if ( v1 == NULL || v2 == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } M = (int**) malloc (N*sizeof(int*)); // i como private en un for establece que cada hebra tendra una copia de i, pero en parallel for tendra cada una i como sigue // i = 0, i = 3, i = 6 para un bucle de N = 9 #pragma omp parallel for shared(M,N) private(i) default(none) for(i = 0; i<N; i++){ M[i] = (int*) malloc (N*sizeof(int)); if( M[i] == NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } } // == Inicializacion // ====================================================> // M, v1, v2, N, i compartidas // Cada hebra se encargará de una parte del bucle usando i // k es privada // Para que cada hebra que este calculando la parte iesima del bucle y tenga una copia de k = 0 propia, parte k es secuencial for(i = 0; i<N; i++){ #pragma omp parallel for shared(M,i,N) private(k) default(none) for(k = 0; k<N; k++) M[i][k] = rand() % 8; } #pragma omp parallel for shared(v1,v2,N) private(i) default(none) for(i = 0; i<N; i++){ v1[i] = rand() % 6; v2[i] = 0; } // == Calculo // ====================================================> cgt1 = omp_get_wtime(); // Dejamos el vector resultado v2 lo dejo como shared para que todas las hebras puedan acceder a el sin necesidad de tener una copia // local, pero los accesos a ese vector las hebras lo tienen que hacer de forma atomica sin interfoliaciones. for(i = 0; i<N; i++){ #pragma omp parallel shared(M,i,N,v2,v1) private(k) default(none) { int sumalocal = 0; #pragma omp for for(k = 0; k<N; k++) sumalocal += M[i][k] * v1[k]; #pragma omp atomic v2[i] += sumalocal; } } cgt2 = omp_get_wtime(); ncgt = (double)(cgt2 - cgt1); // == Imprimir Mensajes // ====================================================> printf("Tiempo(seg.):%11.9f\n", ncgt); printf("Tamaño de los vectores: %u\n", N); printf("\tv1 = %uElem -> %lu bytes\n\tv2 = %uElem -> %lu bytes\n", N, N*sizeof(int), N, N*sizeof(int)); printf("Tamaño de la matriz: %ux%u -> %lu bytes\n", N, N, N*N*sizeof(int)); // Imprimir el primer y último componente del resultado evita que las optimizaciones del compilador // eliminen el código de la suma. printf("v2[0] = %u ... v2[N-1] = %u \n", v2[0], v2[N-1]); // Para tamaños pequeños de N < 15 mostrar los valores calculados if(N < 15){ printf("\n----------- Matriz M ----------- \n"); for(i = 0; i<N; i++){ for(k = 0; k<N; k++) printf("%u\t", M[i][k]); printf("\n"); } printf("\n----------- Vector V1 ----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v1[i]); printf("\n"); printf("\n----------- Vector V2----------- \n"); for(i = 0; i<N; i++) printf("%u\t", v2[i]); printf("\n"); } // == Liberar Memoria // ====================================================> free(v1); free(v2); #pragma omp parallel for shared(M,N) private(i) default(none) for(i = 0; i<N; i++) free(M[i]); free(M); }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-4,8),ceild(4*t2-Nz-3,16));t3<=min(min(floord(4*Nt+Ny-9,16),floord(2*t1+Ny-3,16)),floord(4*t2+Ny-9,16));t3++) { for (t4=max(max(ceild(t1-60,64),ceild(4*t2-Nz-115,128)),ceild(16*t3-Ny-115,128));t4<=min(min(min(floord(4*Nt+Nx-9,128),floord(2*t1+Nx-3,128)),floord(4*t2+Nx-9,128)),floord(16*t3+Nx+3,128));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(128*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****)malloc(sizeof(double ***) * 2); double ***roc2 = (double ***)malloc(sizeof(double **)); A[0] = (double ***)malloc(sizeof(double **) * Nz); A[1] = (double ***)malloc(sizeof(double **) * Nz); roc2 = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[0][i] = (double **)malloc(sizeof(double *) * Ny); A[1][i] = (double **)malloc(sizeof(double *) * Ny); roc2[i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[0][i][j] = (double *)malloc(sizeof(double) * Nx); A[1][i][j] = (double *)malloc(sizeof(double) * Nx); roc2[i][j] = (double *)malloc(sizeof(double) * Nx); } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= 2 * Nt - 2; t1++) { lbp = ceild(t1 + 2, 2); ubp = min(floord(4 * Nt + Nz - 9, 4), floord(2 * t1 + Nz - 4, 4)); for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(ceild(t1 - 4, 8), ceild(4 * t2 - Nz - 3, 16)); t3 <= min(min(floord(4 * Nt + Ny - 9, 16), floord(2 * t1 + Ny - 3, 16)), floord(4 * t2 + Ny - 9, 16)); t3++) { for (t4 = max(max(ceild(t1 - 60, 64), ceild(4 * t2 - Nz - 115, 128)), ceild(16 * t3 - Ny - 115, 128)); t4 <= min(min(min(floord(4 * Nt + Nx - 9, 128), floord(2 * t1 + Nx - 3, 128)), floord(4 * t2 + Nx - 9, 128)), floord(16 * t3 + Nx + 3, 128)); t4++) { for (t5 = max(max(max(ceild(t1, 2), ceild(4 * t2 - Nz + 5, 4)), ceild(16 * t3 - Ny + 5, 4)), ceild(128 * t4 - Nx + 5, 4)); t5 <= floord(t1 + 1, 2); t5++) { for (t6 = max(4 * t2, -4 * t1 + 4 * t2 + 8 * t5 - 3); t6 <= min(min(4 * t2 + 3, -4 * t1 + 4 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(16 * t3, 4 * t5 + 4); t7 <= min(16 * t3 + 15, 4 * t5 + Ny - 5); t7++) { lbv = max(128 * t4, 4 * t5 + 4); ubv = min(128 * t4 + 127, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((2.0 * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) - A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (roc2[(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (((((coef0 * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef1 * (((((A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef2 * (((((A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef3 * (((((A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef4 * (((((A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****)malloc(sizeof(double ***) * 2); double ***roc2 = (double ***)malloc(sizeof(double **)); A[0] = (double ***)malloc(sizeof(double **) * Nz); A[1] = (double ***)malloc(sizeof(double **) * Nz); roc2 = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[0][i] = (double **)malloc(sizeof(double *) * Ny); A[1][i] = (double **)malloc(sizeof(double *) * Ny); roc2[i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[0][i][j] = (double *)malloc(sizeof(double) * Nx); A[1][i][j] = (double *)malloc(sizeof(double) * Nx); roc2[i][j] = (double *)malloc(sizeof(double) * Nx); } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 /* * Copyright (C) 1991-2014 Free Software Foundation, Inc. This file * is part of the GNU C Library. * * The GNU C Library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * The GNU C Library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with the GNU C Library; if not, see * <http://www.gnu.org/licenses/>. */ /* * This header is separate from features.h so that the compiler can * include it implicitly at the start of every compilation. It must * not itself include <features.h> or any other header that includes * <features.h> because the implicit include comes before any feature * test macros that may be defined in a source file before it first * explicitly includes a system header. GCC knows the name of this * header in order to preinclude it. */ /* * glibc's intent is to support the IEC 559 math functionality, real * and complex. If the GCC (4.9 and later) predefined macros * specifying compiler intent are available, use them to determine * whether the overall intent is to support these features; * otherwise, presume an older compiler has intent to support these * features and define these macros by default. */ /* * wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / * Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1 = -1; t1 <= 2 * Nt - 2; t1++) { lbp = ceild(t1 + 2, 2); ubp = min(floord(4 * Nt + Nz - 9, 4), floord(2 * t1 + Nz - 4, 4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2 = lbp; t2 <= ubp; t2++) { for (t3 = max(ceild(t1 - 4, 8), ceild(4 * t2 - Nz - 3, 16)); t3 <= min(min(floord(4 * Nt + Ny - 9, 16), floord(2 * t1 + Ny - 3, 16)), floord(4 * t2 + Ny - 9, 16)); t3++) { for (t4 = max(max(ceild(t1 - 60, 64), ceild(4 * t2 - Nz - 115, 128)), ceild(16 * t3 - Ny - 115, 128)); t4 <= min(min(min(floord(4 * Nt + Nx - 9, 128), floord(2 * t1 + Nx - 3, 128)), floord(4 * t2 + Nx - 9, 128)), floord(16 * t3 + Nx + 3, 128)); t4++) { for (t5 = max(max(max(ceild(t1, 2), ceild(4 * t2 - Nz + 5, 4)), ceild(16 * t3 - Ny + 5, 4)), ceild(128 * t4 - Nx + 5, 4)); t5 <= floord(t1 + 1, 2); t5++) { for (t6 = max(4 * t2, -4 * t1 + 4 * t2 + 8 * t5 - 3); t6 <= min(min(4 * t2 + 3, -4 * t1 + 4 * t2 + 8 * t5), 4 * t5 + Nz - 5); t6++) { for (t7 = max(16 * t3, 4 * t5 + 4); t7 <= min(16 * t3 + 15, 4 * t5 + Ny - 5); t7++) { lbv = max(128 * t4, 4 * t5 + 4); ubv = min(128 * t4 + 127, 4 * t5 + Nx - 5); #pragma ivdep #pragma vector always for (t8 = lbv; t8 <= ubv; t8++) { A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] = (((2.0 * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) - A[(t5 + 1) % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (roc2[(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)] * (((((coef0 * A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8)]) + (coef1 * (((((A[t5 % 2][(-4 * t5 + t6) - 1][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 1][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 1][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 1][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 1]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 1]))) + (coef2 * (((((A[t5 % 2][(-4 * t5 + t6) - 2][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 2][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 2][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 2][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 2]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 2]))) + (coef3 * (((((A[t5 % 2][(-4 * t5 + t6) - 3][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 3][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 3][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 3][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 3]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 3]))) + (coef4 * (((((A[t5 % 2][(-4 * t5 + t6) - 4][(-4 * t5 + t7)][(-4 * t5 + t8)] + A[t5 % 2][(-4 * t5 + t6) + 4][(-4 * t5 + t7)][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) - 4][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7) + 4][(-4 * t5 + t8)]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) - 4]) + A[t5 % 2][(-4 * t5 + t6)][(-4 * t5 + t7)][(-4 * t5 + t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
kernel.h
void malvar_he_cutler_demosaic ( const uint teamX, const uint teamY, const uint height, const uint width, const uchar *__restrict__ input_image_p, const uint input_image_pitch, uchar *__restrict__ output_image_p, const uint output_image_pitch, const int bayer_pattern ) { #pragma omp target teams num_teams(teamX * teamY) thread_limit(tile_cols*tile_rows) { LDSPixelT apron[apron_rows * apron_cols]; #pragma omp parallel { const uint tile_col_blocksize = tile_cols; const uint tile_row_blocksize = tile_rows; const uint tile_col_block = omp_get_team_num() % teamX; const uint tile_row_block = omp_get_team_num() / teamX; const uint tile_col = omp_get_thread_num() % tile_cols; const uint tile_row = omp_get_thread_num() / tile_cols; const uint g_c = tile_col_blocksize * tile_col_block + tile_col; const uint g_r = tile_row_blocksize * tile_row_block + tile_row; const bool valid_pixel_task = (g_r < height) & (g_c < width); const uint tile_flat_id = tile_row * tile_cols + tile_col; for(uint apron_fill_task_id = tile_flat_id; apron_fill_task_id < n_apron_fill_tasks; apron_fill_task_id += n_tile_pixels){ const uint apron_read_row = apron_fill_task_id / apron_cols; const uint apron_read_col = apron_fill_task_id % apron_cols; const int ag_c = ((int)(apron_read_col + tile_col_block * tile_col_blocksize)) - shalf_ksize; const int ag_r = ((int)(apron_read_row + tile_row_block * tile_row_blocksize)) - shalf_ksize; apron[apron_read_row * apron_cols + apron_read_col] = tex2D_at(PixelT, input_image, ag_r, ag_c); } #pragma omp barrier //valid tasks read from [half_ksize, (tile_rows|tile_cols) + kernel_size - 1) const uint a_c = tile_col + half_ksize; const uint a_r = tile_row + half_ksize; assert_val(a_c >= half_ksize && a_c < apron_cols - half_ksize, a_c); assert_val(a_r >= half_ksize && a_r < apron_rows - half_ksize, a_r); //note the following formulas are col, row convention and uses i,j - this is done to preserve readability with the originating paper const uint i = a_c; const uint j = a_r; #define F(_i, _j) apron_pixel((_j), (_i)) const int Fij = F(i,j); //symmetric 4,2,-1 response - cross const int R1 = (4*F(i, j) + 2*(F(i-1,j) + F(i,j-1) + F(i+1,j) + F(i,j+1)) - F(i-2,j) - F(i+2,j) - F(i,j-2) - F(i,j+2)) / 8; //left-right symmetric response - with .5,1,4,5 - theta const int R2 = ( 8*(F(i-1,j) + F(i+1,j)) +10*F(i,j) + F(i,j-2) + F(i,j+2) - 2*((F(i-1,j-1) + F(i+1,j-1) + F(i-1,j+1) + F(i+1,j+1)) + F(i-2,j) + F(i+2,j))) / 16; //top-bottom symmetric response - with .5,1,4,5 - phi const int R3 = ( 8*(F(i,j-1) + F(i,j+1)) +10*F(i,j) + F(i-2,j) + F(i+2,j) - 2*((F(i-1,j-1) + F(i+1,j-1) + F(i-1,j+1) + F(i+1,j+1)) + F(i,j-2) + F(i,j+2))) / 16; //symmetric 3/2s response - checker const int R4 = ( 12*F(i,j) - 3*(F(i-2,j) + F(i+2,j) + F(i,j-2) + F(i,j+2)) + 4*(F(i-1,j-1) + F(i+1,j-1) + F(i-1,j+1) + F(i+1,j+1))) / 16; const int G_at_red_or_blue = R1; const int R_at_G_in_red = R2; const int B_at_G_in_blue = R2; const int R_at_G_in_blue = R3; const int B_at_G_in_red = R3; const int R_at_B = R4; const int B_at_R = R4; #undef F #undef j #undef i //RGGB -> RedXY = (0, 0), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (1, 1) //GRBG -> RedXY = (1, 0), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (0, 1) //GBRG -> RedXY = (0, 1), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (1, 0) //BGGR -> RedXY = (1, 1), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (0, 0) const int r_mod_2 = g_r & 1; const int c_mod_2 = g_c & 1; #define is_rggb (bayer_pattern == RGGB) #define is_grbg (bayer_pattern == GRBG) #define is_gbrg (bayer_pattern == GBRG) #define is_bggr (bayer_pattern == BGGR) const int red_col = is_grbg | is_bggr; const int red_row = is_gbrg | is_bggr; const int blue_col = 1 - red_col; const int blue_row = 1 - red_row; const int in_red_row = r_mod_2 == red_row; const int in_blue_row = r_mod_2 == blue_row; const int is_red_pixel = (r_mod_2 == red_row) & (c_mod_2 == red_col); const int is_blue_pixel = (r_mod_2 == blue_row) & (c_mod_2 == blue_col); const int is_green_pixel = !(is_red_pixel | is_blue_pixel); assert(is_green_pixel + is_blue_pixel + is_red_pixel == 1); assert(in_red_row + in_blue_row == 1); //at R locations: R is original //at B locations it is the 3/2s symmetric response //at G in red rows it is the left-right symmmetric with 4s //at G in blue rows it is the top-bottom symmetric with 4s const RGBPixelBaseT R = output_pixel_cast( Fij * is_red_pixel + R_at_B * is_blue_pixel + R_at_G_in_red * (is_green_pixel & in_red_row) + R_at_G_in_blue * (is_green_pixel & in_blue_row) ); //at B locations: B is original //at R locations it is the 3/2s symmetric response //at G in red rows it is the top-bottom symmmetric with 4s //at G in blue rows it is the left-right symmetric with 4s const RGBPixelBaseT B = output_pixel_cast( Fij * is_blue_pixel + B_at_R * is_red_pixel + B_at_G_in_red * (is_green_pixel & in_red_row) + B_at_G_in_blue * (is_green_pixel & in_blue_row) ); //at G locations: G is original //at R locations: symmetric 4,2,-1 //at B locations: symmetric 4,2,-1 const RGBPixelBaseT G = output_pixel_cast(Fij * is_green_pixel + G_at_red_or_blue * (!is_green_pixel)); if(valid_pixel_task){ RGBPixelT output; #if OUTPUT_CHANNELS == 3 || OUTPUT_CHANNELS == 4 output.x = R; output.y = G; output.z = B; #if OUTPUT_CHANNELS == 4 output.w = ALPHA_VALUE; #endif #else #error "Unsupported number of output channels" #endif pixel_at(RGBPixelT, output_image, g_r, g_c) = output; } } } }
void malvar_he_cutler_demosaic( const uint teamX, const uint teamY, const uint height, const uint width, const uchar * __restrict__ input_image_p, const uint input_image_pitch, uchar * __restrict__ output_image_p, const uint output_image_pitch, const int bayer_pattern) { LDSPixelT apron[apron_rows * apron_cols]; #pragma omp parallel { const uint tile_col_blocksize = tile_cols; const uint tile_row_blocksize = tile_rows; const uint tile_col_block = omp_get_team_num() % teamX; const uint tile_row_block = omp_get_team_num() / teamX; const uint tile_col = omp_get_thread_num() % tile_cols; const uint tile_row = omp_get_thread_num() / tile_cols; const uint g_c = tile_col_blocksize * tile_col_block + tile_col; const uint g_r = tile_row_blocksize * tile_row_block + tile_row; const bool valid_pixel_task = (g_r < height) & (g_c < width); const uint tile_flat_id = tile_row * tile_cols + tile_col; for (uint apron_fill_task_id = tile_flat_id; apron_fill_task_id < n_apron_fill_tasks; apron_fill_task_id += n_tile_pixels) { const uint apron_read_row = apron_fill_task_id / apron_cols; const uint apron_read_col = apron_fill_task_id % apron_cols; const int ag_c = ((int)(apron_read_col + tile_col_block * tile_col_blocksize)) - shalf_ksize; const int ag_r = ((int)(apron_read_row + tile_row_block * tile_row_blocksize)) - shalf_ksize; apron[apron_read_row * apron_cols + apron_read_col] = tex2D_at(PixelT, input_image, ag_r, ag_c); } //valid tasks read from[half_ksize, (tile_rows | tile_cols) + kernel_size - 1) const uint a_c = tile_col + half_ksize; const uint a_r = tile_row + half_ksize; assert_val(a_c >= half_ksize && a_c < apron_cols - half_ksize, a_c); assert_val(a_r >= half_ksize && a_r < apron_rows - half_ksize, a_r); //note the following formulas are col, row convention and uses i, j - this is done to preserve readability with the originating paper const uint i = a_c; const uint j = a_r; #define F(_i, _j) apron_pixel((_j), (_i)) const int Fij = F(i, j); //symmetric 4, 2, -1 response - cross const int R1 = (4 * F(i, j) + 2 * (F(i - 1, j) + F(i, j - 1) + F(i + 1, j) + F(i, j + 1)) - F(i - 2, j) - F(i + 2, j) - F(i, j - 2) - F(i, j + 2)) / 8; //left - right symmetric response - with .5, 1, 4, 5 - theta const int R2 = ( 8 * (F(i - 1, j) + F(i + 1, j)) + 10 * F(i, j) + F(i, j - 2) + F(i, j + 2) - 2 * ((F(i - 1, j - 1) + F(i + 1, j - 1) + F(i - 1, j + 1) + F(i + 1, j + 1)) + F(i - 2, j) + F(i + 2, j))) / 16; //top - bottom symmetric response - with .5, 1, 4, 5 - phi const int R3 = ( 8 * (F(i, j - 1) + F(i, j + 1)) + 10 * F(i, j) + F(i - 2, j) + F(i + 2, j) - 2 * ((F(i - 1, j - 1) + F(i + 1, j - 1) + F(i - 1, j + 1) + F(i + 1, j + 1)) + F(i, j - 2) + F(i, j + 2))) / 16; //symmetric 3 / 2 s response - checker const int R4 = ( 12 * F(i, j) - 3 * (F(i - 2, j) + F(i + 2, j) + F(i, j - 2) + F(i, j + 2)) + 4 * (F(i - 1, j - 1) + F(i + 1, j - 1) + F(i - 1, j + 1) + F(i + 1, j + 1))) / 16; const int G_at_red_or_blue = R1; const int R_at_G_in_red = R2; const int B_at_G_in_blue = R2; const int R_at_G_in_blue = R3; const int B_at_G_in_red = R3; const int R_at_B = R4; const int B_at_R = R4; #undef F #undef j #undef i //RGGB->RedXY = (0, 0), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (1, 1) // GRBG->RedXY = (1, 0), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (0, 1) // GBRG->RedXY = (0, 1), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (1, 0) // BGGR->RedXY = (1, 1), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (0, 0) const int r_mod_2 = g_r & 1; const int c_mod_2 = g_c & 1; #define is_rggb (bayer_pattern == RGGB) #define is_grbg (bayer_pattern == GRBG) #define is_gbrg (bayer_pattern == GBRG) #define is_bggr (bayer_pattern == BGGR) const int red_col = is_grbg | is_bggr; const int red_row = is_gbrg | is_bggr; const int blue_col = 1 - red_col; const int blue_row = 1 - red_row; const int in_red_row = r_mod_2 == red_row; const int in_blue_row = r_mod_2 == blue_row; const int is_red_pixel = (r_mod_2 == red_row) & (c_mod_2 == red_col); const int is_blue_pixel = (r_mod_2 == blue_row) & (c_mod_2 == blue_col); const int is_green_pixel = !(is_red_pixel | is_blue_pixel); assert(is_green_pixel + is_blue_pixel + is_red_pixel == 1); assert(in_red_row + in_blue_row == 1); //at R locations:R is original // at B locations it is the 3 / 2 s symmetric response // at G in red rows it is the left - right symmmetric with 4 s // at G in blue rows it is the top - bottom symmetric with 4 s const RGBPixelBaseT R = output_pixel_cast( Fij * is_red_pixel + R_at_B * is_blue_pixel + R_at_G_in_red * (is_green_pixel & in_red_row) + R_at_G_in_blue * (is_green_pixel & in_blue_row) ); //at B locations:B is original // at R locations it is the 3 / 2 s symmetric response // at G in red rows it is the top - bottom symmmetric with 4 s // at G in blue rows it is the left - right symmetric with 4 s const RGBPixelBaseT B = output_pixel_cast( Fij * is_blue_pixel + B_at_R * is_red_pixel + B_at_G_in_red * (is_green_pixel & in_red_row) + B_at_G_in_blue * (is_green_pixel & in_blue_row) ); //at G locations:G is original // at R locations:symmetric 4, 2, -1 // at B locations:symmetric 4, 2, -1 const RGBPixelBaseT G = output_pixel_cast(Fij * is_green_pixel + G_at_red_or_blue * (!is_green_pixel)); if (valid_pixel_task) { RGBPixelT output; #if OUTPUT_CHANNELS == 3 || OUTPUT_CHANNELS == 4 output.x = R; output.y = G; output.z = B; #if OUTPUT_CHANNELS == 4 output.w = ALPHA_VALUE; #endif #else #error "Unsupported number of output channels" #endif pixel_at(RGBPixelT, output_image, g_r, g_c) = output; } } }
void malvar_he_cutler_demosaic( const uint teamX, const uint teamY, const uint height, const uint width, const uchar * __restrict__ input_image_p, const uint input_image_pitch, uchar * __restrict__ output_image_p, const uint output_image_pitch, const int bayer_pattern) { #pragma omp target teams num_teams(teamX * teamY) thread_limit(tile_cols*tile_rows) { LDSPixelT apron[apron_rows * apron_cols]; #pragma omp parallel { const uint tile_col_blocksize = tile_cols; const uint tile_row_blocksize = tile_rows; const uint tile_col_block = omp_get_team_num() % teamX; const uint tile_row_block = omp_get_team_num() / teamX; const uint tile_col = omp_get_thread_num() % tile_cols; const uint tile_row = omp_get_thread_num() / tile_cols; const uint g_c = tile_col_blocksize * tile_col_block + tile_col; const uint g_r = tile_row_blocksize * tile_row_block + tile_row; const bool valid_pixel_task = (g_r < height) & (g_c < width); const uint tile_flat_id = tile_row * tile_cols + tile_col; for (uint apron_fill_task_id = tile_flat_id; apron_fill_task_id < n_apron_fill_tasks; apron_fill_task_id += n_tile_pixels) { const uint apron_read_row = apron_fill_task_id / apron_cols; const uint apron_read_col = apron_fill_task_id % apron_cols; const int ag_c = ((int)(apron_read_col + tile_col_block * tile_col_blocksize)) - shalf_ksize; const int ag_r = ((int)(apron_read_row + tile_row_block * tile_row_blocksize)) - shalf_ksize; apron[apron_read_row * apron_cols + apron_read_col] = tex2D_at(PixelT, input_image, ag_r, ag_c); } #pragma omp barrier //valid tasks read from[half_ksize, (tile_rows | tile_cols) + kernel_size - 1) const uint a_c = tile_col + half_ksize; const uint a_r = tile_row + half_ksize; assert_val(a_c >= half_ksize && a_c < apron_cols - half_ksize, a_c); assert_val(a_r >= half_ksize && a_r < apron_rows - half_ksize, a_r); //note the following formulas are col, row convention and uses i, j - this is done to preserve readability with the originating paper const uint i = a_c; const uint j = a_r; #define F(_i, _j) apron_pixel((_j), (_i)) const int Fij = F(i, j); //symmetric 4, 2, -1 response - cross const int R1 = (4 * F(i, j) + 2 * (F(i - 1, j) + F(i, j - 1) + F(i + 1, j) + F(i, j + 1)) - F(i - 2, j) - F(i + 2, j) - F(i, j - 2) - F(i, j + 2)) / 8; //left - right symmetric response - with .5, 1, 4, 5 - theta const int R2 = ( 8 * (F(i - 1, j) + F(i + 1, j)) + 10 * F(i, j) + F(i, j - 2) + F(i, j + 2) - 2 * ((F(i - 1, j - 1) + F(i + 1, j - 1) + F(i - 1, j + 1) + F(i + 1, j + 1)) + F(i - 2, j) + F(i + 2, j))) / 16; //top - bottom symmetric response - with .5, 1, 4, 5 - phi const int R3 = ( 8 * (F(i, j - 1) + F(i, j + 1)) + 10 * F(i, j) + F(i - 2, j) + F(i + 2, j) - 2 * ((F(i - 1, j - 1) + F(i + 1, j - 1) + F(i - 1, j + 1) + F(i + 1, j + 1)) + F(i, j - 2) + F(i, j + 2))) / 16; //symmetric 3 / 2 s response - checker const int R4 = ( 12 * F(i, j) - 3 * (F(i - 2, j) + F(i + 2, j) + F(i, j - 2) + F(i, j + 2)) + 4 * (F(i - 1, j - 1) + F(i + 1, j - 1) + F(i - 1, j + 1) + F(i + 1, j + 1))) / 16; const int G_at_red_or_blue = R1; const int R_at_G_in_red = R2; const int B_at_G_in_blue = R2; const int R_at_G_in_blue = R3; const int B_at_G_in_red = R3; const int R_at_B = R4; const int B_at_R = R4; #undef F #undef j #undef i //RGGB->RedXY = (0, 0), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (1, 1) // GRBG->RedXY = (1, 0), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (0, 1) // GBRG->RedXY = (0, 1), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (1, 0) // BGGR->RedXY = (1, 1), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (0, 0) const int r_mod_2 = g_r & 1; const int c_mod_2 = g_c & 1; #define is_rggb (bayer_pattern == RGGB) #define is_grbg (bayer_pattern == GRBG) #define is_gbrg (bayer_pattern == GBRG) #define is_bggr (bayer_pattern == BGGR) const int red_col = is_grbg | is_bggr; const int red_row = is_gbrg | is_bggr; const int blue_col = 1 - red_col; const int blue_row = 1 - red_row; const int in_red_row = r_mod_2 == red_row; const int in_blue_row = r_mod_2 == blue_row; const int is_red_pixel = (r_mod_2 == red_row) & (c_mod_2 == red_col); const int is_blue_pixel = (r_mod_2 == blue_row) & (c_mod_2 == blue_col); const int is_green_pixel = !(is_red_pixel | is_blue_pixel); assert(is_green_pixel + is_blue_pixel + is_red_pixel == 1); assert(in_red_row + in_blue_row == 1); //at R locations:R is original // at B locations it is the 3 / 2 s symmetric response // at G in red rows it is the left - right symmmetric with 4 s // at G in blue rows it is the top - bottom symmetric with 4 s const RGBPixelBaseT R = output_pixel_cast( Fij * is_red_pixel + R_at_B * is_blue_pixel + R_at_G_in_red * (is_green_pixel & in_red_row) + R_at_G_in_blue * (is_green_pixel & in_blue_row) ); //at B locations:B is original // at R locations it is the 3 / 2 s symmetric response // at G in red rows it is the top - bottom symmmetric with 4 s // at G in blue rows it is the left - right symmetric with 4 s const RGBPixelBaseT B = output_pixel_cast( Fij * is_blue_pixel + B_at_R * is_red_pixel + B_at_G_in_red * (is_green_pixel & in_red_row) + B_at_G_in_blue * (is_green_pixel & in_blue_row) ); //at G locations:G is original // at R locations:symmetric 4, 2, -1 // at B locations:symmetric 4, 2, -1 const RGBPixelBaseT G = output_pixel_cast(Fij * is_green_pixel + G_at_red_or_blue * (!is_green_pixel)); if (valid_pixel_task) { RGBPixelT output; #if OUTPUT_CHANNELS == 3 || OUTPUT_CHANNELS == 4 output.x = R; output.y = G; output.z = B; #if OUTPUT_CHANNELS == 4 output.w = ALPHA_VALUE; #endif #else #error "Unsupported number of output channels" #endif pixel_at(RGBPixelT, output_image, g_r, g_c) = output; } } } }
GB_unop__cos_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cos_fc64_fc64 // op(A') function: GB_unop_tran__cos_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ccos (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccos (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ccos (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cos_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccos (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cos_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cos_fc64_fc64 // op(A') function: GB_unop_tran__cos_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ccos (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccos (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ccos (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cos_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccos (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cos_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cos_fc64_fc64 // op(A') function: GB_unop_tran__cos_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ccos (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccos (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ccos (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cos_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccos (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cos_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
owl_matrix_swap_impl_omp.h
/* * OWL - OCaml Scientific Computing * Copyright (c) 2016-2022 Liang Wang <liang@ocaml.xyz> */ #ifdef OWL_ENABLE_TEMPLATE // swap row i and row j in x(m,n) void FUNCTION (c, swap_rows) (TYPE *x, int m, int n, int i, int j) { if (i != j) { TYPE * src = x + n * i; TYPE * dst = x + n * j; if (n >= OWL_OMP_THRESHOLD_DEFAULT) { #pragma omp parallel for schedule(static) for (int k = 0; k < n; k++) { TYPE t = *(src + k); *(src + k) = *(dst + k); *(dst + k) = t; } } else { for (int k = 0; k < n; k++) { TYPE t = *(src + k); *(src + k) = *(dst + k); *(dst + k) = t; } } } } // stub function of swap_rows CAMLprim value FUNCTION (stub, swap_rows) (value vX, value vM, value vN, value vI, value vJ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; int m = Long_val(vM); int n = Long_val(vN); int i = Long_val(vI); int j = Long_val(vJ); FUNCTION (c, swap_rows) (X_data, m, n, i, j); return Val_unit; } // swap column i and column j in x(m,n) void FUNCTION (c, swap_cols) (TYPE *x, int m, int n, int i, int j) { if (i != j) { TYPE * src = x + i; TYPE * dst = x + j; if (m >= OWL_OMP_THRESHOLD_DEFAULT) { #pragma omp parallel for schedule(static) for (int k = 0; k < m; k++) { int base = k * n; TYPE t = *(src + base); *(src + base) = *(dst + base); *(dst + base) = t; } } else { int base = 0; for (int k = 0; k < m; k++) { TYPE t = *(src + base); *(src + base) = *(dst + base); *(dst + base) = t; base += n; } } } } // stub function of swap_cols CAMLprim value FUNCTION (stub, swap_cols) (value vX, value vM, value vN, value vI, value vJ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; int m = Long_val(vM); int n = Long_val(vN); int i = Long_val(vI); int j = Long_val(vJ); FUNCTION (c, swap_cols) (X_data, m, n, i, j); return Val_unit; } // transpose x(m,n) and save to y(n,m) void FUNCTION (c, transpose) (TYPE *x, TYPE *y, int m, int n) { int ofsx = 0; int ofsy = 0; if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) { #pragma omp parallel for schedule(static) for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { *(y + i + j * m) = *(x + j + i * n); } } } else { for (int i = 0; i < m; i++) { ofsy = i; for (int j = 0; j < n; j++) { *(y + ofsy) = *(x + ofsx); ofsy += m; ofsx += 1; } } } } // stub function of transpose CAMLprim value FUNCTION (stub, transpose) (value vX, value vY) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; FUNCTION (c, transpose) (X_data, Y_data, X->dim[0], X->dim[1]); return Val_unit; } // conjugate transpose x(m,n) and save to y(n,m) void FUNCTION (c, ctranspose) (TYPE *x, TYPE *y, int m, int n) { int ofsx = 0; int ofsy = 0; if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) { #pragma omp parallel for schedule(static) for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { *(y + i + j * m) = CONJ_FUN(*(x + j + i * n)); } } } else { for (int i = 0; i < m; i++) { ofsy = i; for (int j = 0; j < n; j++) { *(y + ofsy) = CONJ_FUN(*(x + ofsx)); ofsy += m; ofsx += 1; } } } } // stub function of ctranspose CAMLprim value FUNCTION (stub, ctranspose) (value vX, value vY) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; FUNCTION (c, ctranspose) (X_data, Y_data, X->dim[0], X->dim[1]); return Val_unit; } #endif /* OWL_ENABLE_TEMPLATE */
#ifdef OWL_ENABLE_TEMPLATE // swap row i and row j in x(m, n) void FUNCTION(c, swap_rows) (TYPE * x, int m, int n, int i, int j) { if (i != j) { TYPE *src = x + n * i; TYPE *dst = x + n * j; if (n >= OWL_OMP_THRESHOLD_DEFAULT) { for (int k = 0; k < n; k++) { TYPE t = *(src + k); *(src + k) = *(dst + k); *(dst + k) = t; } } else { for (int k = 0; k < n; k++) { TYPE t = *(src + k); *(src + k) = *(dst + k); *(dst + k) = t; } } } } //stub function of swap_rows CAMLprim value FUNCTION(stub, swap_rows) (value vX, value vM, value vN, value vI, value vJ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; int m = Long_val(vM); int n = Long_val(vN); int i = Long_val(vI); int j = Long_val(vJ); FUNCTION(c, swap_rows) (X_data, m, n, i, j); return Val_unit; } //swap column i and column j in x(m, n) void FUNCTION(c, swap_cols) (TYPE * x, int m, int n, int i, int j) { if (i != j) { TYPE *src = x + i; TYPE *dst = x + j; if (m >= OWL_OMP_THRESHOLD_DEFAULT) { for (int k = 0; k < m; k++) { int base = k * n; TYPE t = *(src + base); *(src + base) = *(dst + base); *(dst + base) = t; } } else { int base = 0; for (int k = 0; k < m; k++) { TYPE t = *(src + base); *(src + base) = *(dst + base); *(dst + base) = t; base += n; } } } } //stub function of swap_cols CAMLprim value FUNCTION(stub, swap_cols) (value vX, value vM, value vN, value vI, value vJ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; int m = Long_val(vM); int n = Long_val(vN); int i = Long_val(vI); int j = Long_val(vJ); FUNCTION(c, swap_cols) (X_data, m, n, i, j); return Val_unit; } //transpose x(m, n) and save to y(n, m) void FUNCTION(c, transpose) (TYPE * x, TYPE * y, int m, int n) { int ofsx = 0; int ofsy = 0; if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) { for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { *(y + i + j * m) = *(x + j + i * n); } } } else { for (int i = 0; i < m; i++) { ofsy = i; for (int j = 0; j < n; j++) { *(y + ofsy) = *(x + ofsx); ofsy += m; ofsx += 1; } } } } //stub function of transpose CAMLprim value FUNCTION(stub, transpose) (value vX, value vY) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; FUNCTION(c, transpose) (X_data, Y_data, X->dim[0], X->dim[1]); return Val_unit; } //conjugate transpose x(m, n) and save to y(n, m) void FUNCTION(c, ctranspose) (TYPE * x, TYPE * y, int m, int n) { int ofsx = 0; int ofsy = 0; if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) { for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { *(y + i + j * m) = CONJ_FUN(*(x + j + i * n)); } } } else { for (int i = 0; i < m; i++) { ofsy = i; for (int j = 0; j < n; j++) { *(y + ofsy) = CONJ_FUN(*(x + ofsx)); ofsy += m; ofsx += 1; } } } } //stub function of ctranspose CAMLprim value FUNCTION(stub, ctranspose) (value vX, value vY) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; FUNCTION(c, ctranspose) (X_data, Y_data, X->dim[0], X->dim[1]); return Val_unit; } #endif /* OWL_ENABLE_TEMPLATE */
#ifdef OWL_ENABLE_TEMPLATE // swap row i and row j in x(m, n) void FUNCTION(c, swap_rows) (TYPE * x, int m, int n, int i, int j) { if (i != j) { TYPE *src = x + n * i; TYPE *dst = x + n * j; if (n >= OWL_OMP_THRESHOLD_DEFAULT) { #pragma omp parallel for schedule(static) for (int k = 0; k < n; k++) { TYPE t = *(src + k); *(src + k) = *(dst + k); *(dst + k) = t; } } else { for (int k = 0; k < n; k++) { TYPE t = *(src + k); *(src + k) = *(dst + k); *(dst + k) = t; } } } } //stub function of swap_rows CAMLprim value FUNCTION(stub, swap_rows) (value vX, value vM, value vN, value vI, value vJ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; int m = Long_val(vM); int n = Long_val(vN); int i = Long_val(vI); int j = Long_val(vJ); FUNCTION(c, swap_rows) (X_data, m, n, i, j); return Val_unit; } //swap column i and column j in x(m, n) void FUNCTION(c, swap_cols) (TYPE * x, int m, int n, int i, int j) { if (i != j) { TYPE *src = x + i; TYPE *dst = x + j; if (m >= OWL_OMP_THRESHOLD_DEFAULT) { #pragma omp parallel for schedule(static) for (int k = 0; k < m; k++) { int base = k * n; TYPE t = *(src + base); *(src + base) = *(dst + base); *(dst + base) = t; } } else { int base = 0; for (int k = 0; k < m; k++) { TYPE t = *(src + base); *(src + base) = *(dst + base); *(dst + base) = t; base += n; } } } } //stub function of swap_cols CAMLprim value FUNCTION(stub, swap_cols) (value vX, value vM, value vN, value vI, value vJ) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; int m = Long_val(vM); int n = Long_val(vN); int i = Long_val(vI); int j = Long_val(vJ); FUNCTION(c, swap_cols) (X_data, m, n, i, j); return Val_unit; } //transpose x(m, n) and save to y(n, m) void FUNCTION(c, transpose) (TYPE * x, TYPE * y, int m, int n) { int ofsx = 0; int ofsy = 0; if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) { #pragma omp parallel for schedule(static) for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { *(y + i + j * m) = *(x + j + i * n); } } } else { for (int i = 0; i < m; i++) { ofsy = i; for (int j = 0; j < n; j++) { *(y + ofsy) = *(x + ofsx); ofsy += m; ofsx += 1; } } } } //stub function of transpose CAMLprim value FUNCTION(stub, transpose) (value vX, value vY) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; FUNCTION(c, transpose) (X_data, Y_data, X->dim[0], X->dim[1]); return Val_unit; } //conjugate transpose x(m, n) and save to y(n, m) void FUNCTION(c, ctranspose) (TYPE * x, TYPE * y, int m, int n) { int ofsx = 0; int ofsy = 0; if (m >= OWL_OMP_THRESHOLD_DEFAULT / 100) { #pragma omp parallel for schedule(static) for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { *(y + i + j * m) = CONJ_FUN(*(x + j + i * n)); } } } else { for (int i = 0; i < m; i++) { ofsy = i; for (int j = 0; j < n; j++) { *(y + ofsy) = CONJ_FUN(*(x + ofsx)); ofsy += m; ofsx += 1; } } } } //stub function of ctranspose CAMLprim value FUNCTION(stub, ctranspose) (value vX, value vY) { struct caml_ba_array *X = Caml_ba_array_val(vX); TYPE *X_data = (TYPE *) X->data; struct caml_ba_array *Y = Caml_ba_array_val(vY); TYPE *Y_data = (TYPE *) Y->data; FUNCTION(c, ctranspose) (X_data, Y_data, X->dim[0], X->dim[1]); return Val_unit; } #endif /* OWL_ENABLE_TEMPLATE */
GB_binop__isne_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_int16 // A.*B function (eWiseMult): GB_AemultB__isne_int16 // A*D function (colscale): GB_AxD__isne_int16 // D*A function (rowscale): GB_DxB__isne_int16 // C+=B function (dense accum): GB_Cdense_accumB__isne_int16 // C+=b function (dense accum): GB_Cdense_accumb__isne_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_int16 // C=scalar+B GB_bind1st__isne_int16 // C=scalar+B' GB_bind1st_tran__isne_int16 // C=A+scalar GB_bind2nd__isne_int16 // C=A'+scalar GB_bind2nd_tran__isne_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT16 || GxB_NO_ISNE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isne_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_int16 // A.*B function (eWiseMult): GB_AemultB__isne_int16 // A*D function (colscale): GB_AxD__isne_int16 // D*A function (rowscale): GB_DxB__isne_int16 // C+=B function (dense accum): GB_Cdense_accumB__isne_int16 // C+=b function (dense accum): GB_Cdense_accumb__isne_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_int16 // C=scalar+B GB_bind1st__isne_int16 // C=scalar+B' GB_bind1st_tran__isne_int16 // C=A+scalar GB_bind2nd__isne_int16 // C=A'+scalar GB_bind2nd_tran__isne_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT16 || GxB_NO_ISNE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isne_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_int16 // A.*B function (eWiseMult): GB_AemultB__isne_int16 // A*D function (colscale): GB_AxD__isne_int16 // D*A function (rowscale): GB_DxB__isne_int16 // C+=B function (dense accum): GB_Cdense_accumB__isne_int16 // C+=b function (dense accum): GB_Cdense_accumb__isne_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_int16 // C=scalar+B GB_bind1st__isne_int16 // C=scalar+B' GB_bind1st_tran__isne_int16 // C=A+scalar GB_bind2nd__isne_int16 // C=A'+scalar GB_bind2nd_tran__isne_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT16 || GxB_NO_ISNE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isne_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_Scalar_extractElement.c
//------------------------------------------------------------------------------ // GB_Scalar_extractElement_template: x = S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Extract the value of single scalar, x = S, typecasting from the // type of S to the type of x, as needed. // Returns GrB_SUCCESS if the GrB_Scalar entry is present, and sets x to its // value. Returns GrB_NO_VALUE if the GrB_Scalar is not present, and x is // unmodified. // This template constructs GrB_Scalar_extractElement_[TYPE] for each of the // 13 built-in types, and the _UDT method for all user-defined types. GrB_Info GB_EXTRACT_ELEMENT // extract a single entry from S ( GB_XTYPE *x, // scalar to extract, not modified if not found const GrB_Scalar S // GrB_Scalar to extract a scalar from ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_RETURN_IF_NULL_OR_FAULTY (S) ; GB_RETURN_IF_NULL (x) ; // delete any lingering zombies, assemble any pending tuples, and unjumble if (GB_ANY_PENDING_WORK (S)) { // extract scalar with pending tuples or zombies. It cannot be // actually jumbled, but S->jumbled might true anyway. GrB_Info info ; GB_WHERE1 (GB_WHERE_STRING) ; GB_BURBLE_START ("GrB_Scalar_extractElement") ; GB_OK (GB_wait ((GrB_Matrix) S, "s", Context)) ; GB_BURBLE_END ; } ASSERT (!GB_ANY_PENDING_WORK (S)) ; // GB_XCODE and S must be compatible GB_Type_code scode = S->type->code ; if (!GB_code_compatible (GB_XCODE, scode)) { return (GrB_DOMAIN_MISMATCH) ; } if (GB_nnz ((GrB_Matrix) S) == 0 // empty || (S->p != NULL && S->p [1] == 0) // sparse/hyper with no entry || (S->b != NULL && S->b [0] == 0)) // bitmap with no entry { // quick return return (GrB_NO_VALUE) ; } //-------------------------------------------------------------------------- // extract the scalar //-------------------------------------------------------------------------- #if !defined ( GB_UDT_EXTRACT ) if (GB_XCODE == scode) { // copy S into x, no typecasting, for built-in types only. GB_XTYPE *restrict Sx = ((GB_XTYPE *) (S->x)) ; (*x) = Sx [0] ; } else #endif { // typecast S into x GB_cast_scalar (x, GB_XCODE, S->x, scode, S->type->size) ; } #pragma omp flush return (GrB_SUCCESS) ; } #undef GB_UDT_EXTRACT #undef GB_EXTRACT_ELEMENT #undef GB_XTYPE #undef GB_XCODE
//------------------------------------------------------------------------------ // GB_Scalar_extractElement_template: x = S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Extract the value of single scalar, x = S, typecasting from the // type of S to the type of x, as needed. // Returns GrB_SUCCESS if the GrB_Scalar entry is present, and sets x to its // value. Returns GrB_NO_VALUE if the GrB_Scalar is not present, and x is // unmodified. // This template constructs GrB_Scalar_extractElement_[TYPE] for each of the // 13 built-in types, and the _UDT method for all user-defined types. GrB_Info GB_EXTRACT_ELEMENT // extract a single entry from S ( GB_XTYPE *x, // scalar to extract, not modified if not found const GrB_Scalar S // GrB_Scalar to extract a scalar from ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_RETURN_IF_NULL_OR_FAULTY (S) ; GB_RETURN_IF_NULL (x) ; // delete any lingering zombies, assemble any pending tuples, and unjumble if (GB_ANY_PENDING_WORK (S)) { // extract scalar with pending tuples or zombies. It cannot be // actually jumbled, but S->jumbled might true anyway. GrB_Info info ; GB_WHERE1 (GB_WHERE_STRING) ; GB_BURBLE_START ("GrB_Scalar_extractElement") ; GB_OK (GB_wait ((GrB_Matrix) S, "s", Context)) ; GB_BURBLE_END ; } ASSERT (!GB_ANY_PENDING_WORK (S)) ; // GB_XCODE and S must be compatible GB_Type_code scode = S->type->code ; if (!GB_code_compatible (GB_XCODE, scode)) { return (GrB_DOMAIN_MISMATCH) ; } if (GB_nnz ((GrB_Matrix) S) == 0 // empty || (S->p != NULL && S->p [1] == 0) // sparse/hyper with no entry || (S->b != NULL && S->b [0] == 0)) // bitmap with no entry { // quick return return (GrB_NO_VALUE) ; } //-------------------------------------------------------------------------- // extract the scalar //-------------------------------------------------------------------------- #if !defined ( GB_UDT_EXTRACT ) if (GB_XCODE == scode) { // copy S into x, no typecasting, for built-in types only. GB_XTYPE *restrict Sx = ((GB_XTYPE *) (S->x)) ; (*x) = Sx [0] ; } else #endif { // typecast S into x GB_cast_scalar (x, GB_XCODE, S->x, scode, S->type->size) ; } return (GrB_SUCCESS) ; } #undef GB_UDT_EXTRACT #undef GB_EXTRACT_ELEMENT #undef GB_XTYPE #undef GB_XCODE
//------------------------------------------------------------------------------ // GB_Scalar_extractElement_template: x = S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Extract the value of single scalar, x = S, typecasting from the // type of S to the type of x, as needed. // Returns GrB_SUCCESS if the GrB_Scalar entry is present, and sets x to its // value. Returns GrB_NO_VALUE if the GrB_Scalar is not present, and x is // unmodified. // This template constructs GrB_Scalar_extractElement_[TYPE] for each of the // 13 built-in types, and the _UDT method for all user-defined types. GrB_Info GB_EXTRACT_ELEMENT // extract a single entry from S ( GB_XTYPE *x, // scalar to extract, not modified if not found const GrB_Scalar S // GrB_Scalar to extract a scalar from ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_RETURN_IF_NULL_OR_FAULTY (S) ; GB_RETURN_IF_NULL (x) ; // delete any lingering zombies, assemble any pending tuples, and unjumble if (GB_ANY_PENDING_WORK (S)) { // extract scalar with pending tuples or zombies. It cannot be // actually jumbled, but S->jumbled might true anyway. GrB_Info info ; GB_WHERE1 (GB_WHERE_STRING) ; GB_BURBLE_START ("GrB_Scalar_extractElement") ; GB_OK (GB_wait ((GrB_Matrix) S, "s", Context)) ; GB_BURBLE_END ; } ASSERT (!GB_ANY_PENDING_WORK (S)) ; // GB_XCODE and S must be compatible GB_Type_code scode = S->type->code ; if (!GB_code_compatible (GB_XCODE, scode)) { return (GrB_DOMAIN_MISMATCH) ; } if (GB_nnz ((GrB_Matrix) S) == 0 // empty || (S->p != NULL && S->p [1] == 0) // sparse/hyper with no entry || (S->b != NULL && S->b [0] == 0)) // bitmap with no entry { // quick return return (GrB_NO_VALUE) ; } //-------------------------------------------------------------------------- // extract the scalar //-------------------------------------------------------------------------- #if !defined ( GB_UDT_EXTRACT ) if (GB_XCODE == scode) { // copy S into x, no typecasting, for built-in types only. GB_XTYPE *restrict Sx = ((GB_XTYPE *) (S->x)) ; (*x) = Sx [0] ; } else #endif { // typecast S into x GB_cast_scalar (x, GB_XCODE, S->x, scode, S->type->size) ; } #pragma omp flush return (GrB_SUCCESS) ; } #undef GB_UDT_EXTRACT #undef GB_EXTRACT_ELEMENT #undef GB_XTYPE #undef GB_XCODE
GB_unaryop__ainv_uint64_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_int8 // op(A') function: GB_tran__ainv_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_int8 // op(A') function: GB_tran__ainv_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint64_int8 // op(A') function: GB_tran__ainv_uint64_int8 // C type: uint64_t // A type: int8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT64 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint64_int8 ( uint64_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint64_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sigmoid_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: hhchen@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #define SIGMOID_MAX(a, b) ((a) > (b) ? (a) : (b)) #define SIGMOID_MIN(a, b) ((a) < (b) ? (a) : (b)) int ref_sigmoid_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { int dim_num = input_tensor->dim_num; if (dim_num == 4) { int batch = input_tensor->dims[0]; int channel = input_tensor->dims[1]; int cstep = input_tensor->dims[2] * input_tensor->dims[3]; int bstep = channel * cstep; for (int n=0; n<batch; n++) { #pragma omp parallel for num_threads(num_thread) for (int c=0; c<channel; c++) { float* input_data = (float*)input_tensor->data + n * bstep + c * cstep; float* output_data = (float*)output_tensor->data + n * bstep + c * cstep; for (int i=0; i<cstep; i++) { output_data[i] = SIGMOID_MIN(input_data[i], 30.0f); output_data[i] = SIGMOID_MAX(input_data[i], -30.0f); output_data[i] = 1.f / (1 + expf(-output_data[i])); } } } } else { uint32_t elem_num = input_tensor->elem_num; float* input_data = input_tensor->data; float* output_data = output_tensor->data; for (int i = 0; i < elem_num; i++) { output_data[i] = SIGMOID_MIN(input_data[i], 30.0f); output_data[i] = SIGMOID_MAX(input_data[i], -30.0f); output_data[i] = 1.f / (1 + expf(-output_data[i])); } } return 0; } int ref_sigmoid_uint8(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { /* dequant */ uint8_t* input_uint8 = input_tensor->data; uint8_t* output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float* input_fp32 = ( float* )sys_malloc(input_size * sizeof(float)); float* output_fp32 = ( float* )sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_fp32[i] = (( float )input_uint8[i] - ( float )input_zero) * input_scale; } for (int i = 0; i < input_size; i++) { output_fp32[i] = SIGMOID_MIN(input_fp32[i], 30.0f); output_fp32[i] = SIGMOID_MAX(input_fp32[i], -30.0f); output_fp32[i] = 1 / (1 + exp(-output_fp32[i])); } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(output_fp32[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_fp32); sys_free(output_fp32); return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int reshape_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_sigmoid_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_sigmoid_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops sigmoid_node_ops = {.prerun = prerun, .run = run, .reshape = reshape_node, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_sigmoid_ref_op() { return register_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops); } int unregister_sigmoid_ref_op() { return unregister_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops); }
/* * Copyright (c) 2021, OPEN AI LAB Author: hhchen@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #define SIGMOID_MAX(a, b) ((a) > (b) ? (a) : (b)) #define SIGMOID_MIN(a, b) ((a) < (b) ? (a) : (b)) int ref_sigmoid_fp32(struct tensor *input_tensor, struct tensor *output_tensor, int num_thread) { int dim_num = input_tensor->dim_num; if (dim_num == 4) { int batch = input_tensor->dims[0]; int channel = input_tensor->dims[1]; int cstep = input_tensor->dims[2] * input_tensor->dims[3]; int bstep = channel * cstep; for (int n = 0; n < batch; n++) { for (int c = 0; c < channel; c++) { float *input_data = (float *)input_tensor->data + n * bstep + c * cstep; float *output_data = (float *)output_tensor->data + n * bstep + c * cstep; for (int i = 0; i < cstep; i++) { output_data[i] = SIGMOID_MIN(input_data[i], 30.0 f); output_data[i] = SIGMOID_MAX(input_data[i], -30.0 f); output_data[i] = 1. f / (1 + expf(-output_data[i])); } } } } else { uint32_t elem_num = input_tensor->elem_num; float *input_data = input_tensor->data; float *output_data = output_tensor->data; for (int i = 0; i < elem_num; i++) { output_data[i] = SIGMOID_MIN(input_data[i], 30.0 f); output_data[i] = SIGMOID_MAX(input_data[i], -30.0 f); output_data[i] = 1. f / (1 + expf(-output_data[i])); } } return 0; } int ref_sigmoid_uint8(struct tensor *input_tensor, struct tensor *output_tensor, int num_thread) { /* dequant */ uint8_t *input_uint8 = input_tensor->data; uint8_t *output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float *input_fp32 = (float *)sys_malloc(input_size * sizeof(float)); float *output_fp32 = (float *)sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_fp32[i] = ((float)input_uint8[i] - (float)input_zero) * input_scale; } for (int i = 0; i < input_size; i++) { output_fp32[i] = SIGMOID_MIN(input_fp32[i], 30.0 f); output_fp32[i] = SIGMOID_MAX(input_fp32[i], -30.0 f); output_fp32[i] = 1 / (1 + exp(-output_fp32[i])); } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(output_fp32[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_fp32); sys_free(output_fp32); return 0; } static int init_node(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { return 0; } static int release_node(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { return 0; } static int reshape_node(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { struct node *ir_node = exec_node->ir_node; struct graph *ir_graph = ir_node->graph; struct tensor *input_tensor; struct tensor *output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static int prerun(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { return 0; } static int run(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { struct node *ir_node = exec_node->ir_node; struct graph *ir_graph = ir_node->graph; struct tensor *input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor *output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_sigmoid_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if (input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_sigmoid_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int score(struct node_ops *node_ops, struct exec_graph *exec_graph, struct node *exec_node) { return OPS_SCORE_CANDO; } static struct node_ops sigmoid_node_ops = {.prerun = prerun, .run = run, .reshape = reshape_node, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_sigmoid_ref_op() { return register_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops); } int unregister_sigmoid_ref_op() { return unregister_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops); }
/* * Copyright (c) 2021, OPEN AI LAB Author: hhchen@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #define SIGMOID_MAX(a, b) ((a) > (b) ? (a) : (b)) #define SIGMOID_MIN(a, b) ((a) < (b) ? (a) : (b)) int ref_sigmoid_fp32(struct tensor *input_tensor, struct tensor *output_tensor, int num_thread) { int dim_num = input_tensor->dim_num; if (dim_num == 4) { int batch = input_tensor->dims[0]; int channel = input_tensor->dims[1]; int cstep = input_tensor->dims[2] * input_tensor->dims[3]; int bstep = channel * cstep; for (int n = 0; n < batch; n++) { #pragma omp parallel for num_threads(num_thread) for (int c = 0; c < channel; c++) { float *input_data = (float *)input_tensor->data + n * bstep + c * cstep; float *output_data = (float *)output_tensor->data + n * bstep + c * cstep; for (int i = 0; i < cstep; i++) { output_data[i] = SIGMOID_MIN(input_data[i], 30.0 f); output_data[i] = SIGMOID_MAX(input_data[i], -30.0 f); output_data[i] = 1. f / (1 + expf(-output_data[i])); } } } } else { uint32_t elem_num = input_tensor->elem_num; float *input_data = input_tensor->data; float *output_data = output_tensor->data; for (int i = 0; i < elem_num; i++) { output_data[i] = SIGMOID_MIN(input_data[i], 30.0 f); output_data[i] = SIGMOID_MAX(input_data[i], -30.0 f); output_data[i] = 1. f / (1 + expf(-output_data[i])); } } return 0; } int ref_sigmoid_uint8(struct tensor *input_tensor, struct tensor *output_tensor, int num_thread) { /* dequant */ uint8_t *input_uint8 = input_tensor->data; uint8_t *output_uint8 = output_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; int32_t input_zero = input_tensor->zero_point; int32_t output_zero = output_tensor->zero_point; int input_size = input_tensor->elem_num; int output_size = output_tensor->elem_num; float *input_fp32 = (float *)sys_malloc(input_size * sizeof(float)); float *output_fp32 = (float *)sys_malloc(output_size * sizeof(float)); for (int i = 0; i < input_size; i++) { input_fp32[i] = ((float)input_uint8[i] - (float)input_zero) * input_scale; } for (int i = 0; i < input_size; i++) { output_fp32[i] = SIGMOID_MIN(input_fp32[i], 30.0 f); output_fp32[i] = SIGMOID_MAX(input_fp32[i], -30.0 f); output_fp32[i] = 1 / (1 + exp(-output_fp32[i])); } /* quant */ for (int i = 0; i < output_size; i++) { int udata = round(output_fp32[i] / output_scale + output_zero); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[i] = udata; } sys_free(input_fp32); sys_free(output_fp32); return 0; } static int init_node(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { return 0; } static int release_node(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { return 0; } static int reshape_node(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { struct node *ir_node = exec_node->ir_node; struct graph *ir_graph = ir_node->graph; struct tensor *input_tensor; struct tensor *output_tensor; int ret = 0; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); if (input_tensor->dims[1] != output_tensor->dims[1] || input_tensor->dims[2] != output_tensor->dims[2] || input_tensor->dims[3] != output_tensor->dims[3]) ret = set_ir_tensor_shape(output_tensor, input_tensor->dims, input_tensor->dim_num); return ret; } static int prerun(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { return 0; } static int run(struct node_ops *node_ops, struct exec_node *exec_node, struct exec_graph *exec_graph) { struct node *ir_node = exec_node->ir_node; struct graph *ir_graph = ir_node->graph; struct tensor *input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor *output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_sigmoid_fp32(input_tensor, output_tensor, exec_graph->num_thread); else if (input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_sigmoid_uint8(input_tensor, output_tensor, exec_graph->num_thread); return ret; } static int score(struct node_ops *node_ops, struct exec_graph *exec_graph, struct node *exec_node) { return OPS_SCORE_CANDO; } static struct node_ops sigmoid_node_ops = {.prerun = prerun, .run = run, .reshape = reshape_node, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_sigmoid_ref_op() { return register_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops); } int unregister_sigmoid_ref_op() { return unregister_builtin_node_ops(OP_SIGMOID, &sigmoid_node_ops); }
hist_par.c
/* NAME: hist_par: create histograms in parallel Purpose: This program will fill an array with pseudo random values, build a histogram of that array, and then compute statistics. This can be used as a simple test of the quality of a random number generator Usage: To keep the program as simple as possible, you must edit the file and change basic parameters. Then compile and run the program. Algorithm: As a point of nomenclature, I like to think of a histogram as a sequence of buckets. I take each item from an array, figure out which bucket it belongs to, then increment the appropriate bucket counter. History: Written by Tim Mattson, 7/2017. updated with multiple methods 8/2021 */ #include <stdio.h> #include <omp.h> #include <math.h> #include "random.h" // uncomment this #define if you want tons of diagnostic output //#define DEBUG 0 #define num_trials 1000000 // number of x values #define num_buckets 50 // number of buckets in hitogram static long xlow = 0.0; // low end of x range static long xhi = 100.0; // High end of x range ///////////////////////////////////////////////////////////////////////// // Utility Functions ///////////////////////////////////////////////////////////////////////// int initHist(long* hist){ for(int i= 0; i< num_buckets; i++) hist[i] = 0; return 0; } int analyzeResults(double time,long *hist) { double sumh=0.0, sumhsq=0.0, ave, std_dev; // compute statistics ... ave, std-dev for whole histogram and quartiles for(int i=0;i<num_buckets;i++){ sumh += (double) hist[i]; sumhsq += (double) hist[i]*hist[i]; } ave = sumh/num_buckets; std_dev = sqrt(sumhsq - sumh*sumh/(double)num_buckets); printf(" histogram for %d buckets of %d values\n",num_buckets, num_trials); printf(" ave = %f, std_dev = %f\n",(float)ave, (float)std_dev); printf(" in %f seconds\n",(float)time); return 0; } ///////////////////////////////////////////////////////////////////////// int main () { double x[num_trials]; // array used to assign counters in the historgram int i; long hist[num_buckets]; // the histogram double bucket_width; // the width of each bucket in the histogram double time; omp_lock_t hist_lcks[num_buckets]; // array of locks, one per bucket #pragma omp parallel { #pragma omp single printf(" %d threads\n",omp_get_num_threads()); #pragma omp for for(i= 0; i< num_buckets; i++) omp_init_lock(&hist_lcks[i]); } seed(xlow, xhi); // seed the random number generator over range of x bucket_width = (xhi-xlow)/(double)num_buckets; // fill the array for(int i=0;i<num_trials;i++) x[i] = drandom(); //////////////////////////////////////////////////////////////// // Assign x values to the right historgram bucket -- sequential //////////////////////////////////////////////////////////////// printf(" Sequential "); initHist(hist); time = omp_get_wtime(); for(int i=0;i<num_trials;i++){ long ival = (long) (x[i] - xlow)/bucket_width; hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n",i,(float)x[i], ival); #endif } time = omp_get_wtime() - time; analyzeResults(time,hist); //////////////////////////////////////////////////////////////// // Assign x values to the right historgram bucket -- critical //////////////////////////////////////////////////////////////// printf(" par with critical "); initHist(hist); time = omp_get_wtime(); #pragma omp parallel for for(int i=0;i<num_trials;i++){ long ival = (long) (x[i] - xlow)/bucket_width; #pragma omp critical hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n",i,(float)x[i], ival); #endif } time = omp_get_wtime() - time; analyzeResults(time,hist); //////////////////////////////////////////////////////////////// // Assign x values to the right historgram bucket -- par with locks //////////////////////////////////////////////////////////////// printf(" par with locks "); initHist(hist); time = omp_get_wtime(); #pragma omp parallel for for(int i=0;i<num_trials;i++){ long ival = (long) (x[i] - xlow)/bucket_width; omp_set_lock(&hist_lcks[ival]); // protect the histogram bucket. Should hist[ival]++; // have little overhead since the locks omp_unset_lock(&hist_lcks[ival]); // are mostly uncontended #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n",i,(float)x[i], ival); #endif } time = omp_get_wtime() - time; analyzeResults(time,hist); //////////////////////////////////////////////////////////////// // Assign x values to the right historgram bucket -- par reduction //////////////////////////////////////////////////////////////// printf(" par with reduction "); initHist(hist); time = omp_get_wtime(); #pragma omp parallel for reduction(+:hist[0:num_buckets]) for(int i=0;i<num_trials;i++){ long ival = (long) (x[i] - xlow)/bucket_width; hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n",i,(float)x[i], ival); #endif } time = omp_get_wtime() - time; analyzeResults(time,hist); return 0; }
/* * * * NAME: hist_par: create histograms in parallel * * Purpose: This program will fill an array with pseudo random values, build a * histogram of that array, and then compute statistics. This can be used * as a simple test of the quality of a random number generator * * Usage: To keep the program as simple as possible, you must edit the file * change basic parameters. Then compile and run the program. * * Algorithm: As a point of nomenclature, I like to think of a histogram as a * sequence of buckets. I take each item from an array, figure out which * bucket it belongs to, then increment the appropriate bucket counter. * * History: Written by Tim Mattson, 7/2017. updated with multiple methods * 8/2021 * */ #include <stdio.h> #include <omp.h> #include <math.h> #include "random.h" //uncomment this #define if you want tons of diagnostic output // #define DEBUG 0 #define num_trials 1000000 // number of x values #define num_buckets 50 // number of buckets in hitogram static long xlow = 0.0; //low end of x range static long xhi = 100.0; //High end of x range ///////////////////////////////////////////////////////////////////////// //Utility Functions ///////////////////////////////////////////////////////////////////////// int initHist(long *hist) { for (int i = 0; i < num_buckets; i++) hist[i] = 0; return 0; } int analyzeResults(double time, long *hist) { double sumh = 0.0, sumhsq = 0.0, ave, std_dev; //compute statistics...ave, std - dev for whole histogram and quartiles for (int i = 0; i < num_buckets; i++) { sumh += (double)hist[i]; sumhsq += (double)hist[i] * hist[i]; } ave = sumh / num_buckets; std_dev = sqrt(sumhsq - sumh * sumh / (double)num_buckets); printf(" histogram for %d buckets of %d values\n", num_buckets, num_trials); printf(" ave = %f, std_dev = %f\n", (float)ave, (float)std_dev); printf(" in %f seconds\n", (float)time); return 0; } ///////////////////////////////////////////////////////////////////////// int main() { double x[num_trials]; //array used to assign counters in the historgram int i; long hist[num_buckets]; //the histogram double bucket_width; //the width of each bucket in the histogram double time; omp_lock_t hist_lcks[num_buckets]; //array of locks, one per bucket printf(" %d threads\n", omp_get_num_threads()); for (i = 0; i < num_buckets; i++) omp_init_lock(&hist_lcks[i]); seed(xlow, xhi); //seed the random number generator over range of x bucket_width = (xhi - xlow) / (double)num_buckets; //fill the array for (int i = 0; i < num_trials; i++) x[i] = drandom(); //////////////////////////////////////////////////////////////// //Assign x values to the right historgram bucket-- sequential //////////////////////////////////////////////////////////////// printf(" Sequential "); initHist(hist); time = omp_get_wtime(); for (int i = 0; i < num_trials; i++) { long ival = (long)(x[i] - xlow) / bucket_width; hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n", i, (float)x[i], ival); #endif /* */ } time = omp_get_wtime() - time; analyzeResults(time, hist); //////////////////////////////////////////////////////////////// //Assign x values to the right historgram bucket-- critical //////////////////////////////////////////////////////////////// printf(" par with critical "); initHist(hist); time = omp_get_wtime(); for (int i = 0; i < num_trials; i++) { long ival = (long)(x[i] - xlow) / bucket_width; hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n", i, (float)x[i], ival); #endif /* */ } time = omp_get_wtime() - time; analyzeResults(time, hist); //////////////////////////////////////////////////////////////// //Assign x values to the right historgram bucket-- par with locks //////////////////////////////////////////////////////////////// printf(" par with locks "); initHist(hist); time = omp_get_wtime(); for (int i = 0; i < num_trials; i++) { long ival = (long)(x[i] - xlow) / bucket_width; omp_set_lock(&hist_lcks[ival]); //protect the histogram bucket.Should hist[ival]++; //have little overhead since the locks omp_unset_lock(&hist_lcks[ival]); //are mostly uncontended #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n", i, (float)x[i], ival); #endif /* */ } time = omp_get_wtime() - time; analyzeResults(time, hist); //////////////////////////////////////////////////////////////// //Assign x values to the right historgram bucket-- par reduction //////////////////////////////////////////////////////////////// printf(" par with reduction "); initHist(hist); time = omp_get_wtime(); for (int i = 0; i < num_trials; i++) { long ival = (long)(x[i] - xlow) / bucket_width; hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n", i, (float)x[i], ival); #endif /* */ } time = omp_get_wtime() - time; analyzeResults(time, hist); return 0; }
/* * * * NAME: hist_par: create histograms in parallel * * Purpose: This program will fill an array with pseudo random values, build a * histogram of that array, and then compute statistics. This can be used * as a simple test of the quality of a random number generator * * Usage: To keep the program as simple as possible, you must edit the file * change basic parameters. Then compile and run the program. * * Algorithm: As a point of nomenclature, I like to think of a histogram as a * sequence of buckets. I take each item from an array, figure out which * bucket it belongs to, then increment the appropriate bucket counter. * * History: Written by Tim Mattson, 7/2017. updated with multiple methods * 8/2021 * */ #include <stdio.h> #include <omp.h> #include <math.h> #include "random.h" //uncomment this #define if you want tons of diagnostic output // #define DEBUG 0 #define num_trials 1000000 // number of x values #define num_buckets 50 // number of buckets in hitogram static long xlow = 0.0; //low end of x range static long xhi = 100.0; //High end of x range ///////////////////////////////////////////////////////////////////////// //Utility Functions ///////////////////////////////////////////////////////////////////////// int initHist(long *hist) { for (int i = 0; i < num_buckets; i++) hist[i] = 0; return 0; } int analyzeResults(double time, long *hist) { double sumh = 0.0, sumhsq = 0.0, ave, std_dev; //compute statistics...ave, std - dev for whole histogram and quartiles for (int i = 0; i < num_buckets; i++) { sumh += (double)hist[i]; sumhsq += (double)hist[i] * hist[i]; } ave = sumh / num_buckets; std_dev = sqrt(sumhsq - sumh * sumh / (double)num_buckets); printf(" histogram for %d buckets of %d values\n", num_buckets, num_trials); printf(" ave = %f, std_dev = %f\n", (float)ave, (float)std_dev); printf(" in %f seconds\n", (float)time); return 0; } ///////////////////////////////////////////////////////////////////////// int main() { double x[num_trials]; //array used to assign counters in the historgram int i; long hist[num_buckets]; //the histogram double bucket_width; //the width of each bucket in the histogram double time; omp_lock_t hist_lcks[num_buckets]; //array of locks, one per bucket #pragma omp parallel { #pragma omp single printf(" %d threads\n", omp_get_num_threads()); #pragma omp for for (i = 0; i < num_buckets; i++) omp_init_lock(&hist_lcks[i]); } seed(xlow, xhi); //seed the random number generator over range of x bucket_width = (xhi - xlow) / (double)num_buckets; //fill the array for (int i = 0; i < num_trials; i++) x[i] = drandom(); //////////////////////////////////////////////////////////////// //Assign x values to the right historgram bucket-- sequential //////////////////////////////////////////////////////////////// printf(" Sequential "); initHist(hist); time = omp_get_wtime(); for (int i = 0; i < num_trials; i++) { long ival = (long)(x[i] - xlow) / bucket_width; hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n", i, (float)x[i], ival); #endif /* */ } time = omp_get_wtime() - time; analyzeResults(time, hist); //////////////////////////////////////////////////////////////// //Assign x values to the right historgram bucket-- critical //////////////////////////////////////////////////////////////// printf(" par with critical "); initHist(hist); time = omp_get_wtime(); #pragma omp parallel for for (int i = 0; i < num_trials; i++) { long ival = (long)(x[i] - xlow) / bucket_width; #pragma omp critical hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n", i, (float)x[i], ival); #endif /* */ } time = omp_get_wtime() - time; analyzeResults(time, hist); //////////////////////////////////////////////////////////////// //Assign x values to the right historgram bucket-- par with locks //////////////////////////////////////////////////////////////// printf(" par with locks "); initHist(hist); time = omp_get_wtime(); #pragma omp parallel for for (int i = 0; i < num_trials; i++) { long ival = (long)(x[i] - xlow) / bucket_width; omp_set_lock(&hist_lcks[ival]); //protect the histogram bucket.Should hist[ival]++; //have little overhead since the locks omp_unset_lock(&hist_lcks[ival]); //are mostly uncontended #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n", i, (float)x[i], ival); #endif /* */ } time = omp_get_wtime() - time; analyzeResults(time, hist); //////////////////////////////////////////////////////////////// //Assign x values to the right historgram bucket-- par reduction //////////////////////////////////////////////////////////////// printf(" par with reduction "); initHist(hist); time = omp_get_wtime(); #pragma omp parallel for reduction(+:hist[0:num_buckets]) for (int i = 0; i < num_trials; i++) { long ival = (long)(x[i] - xlow) / bucket_width; hist[ival]++; #ifdef DEBUG printf("i = %d, xi = %f, ival = %d\n", i, (float)x[i], ival); #endif /* */ } time = omp_get_wtime() - time; analyzeResults(time, hist); return 0; }
eaw-experimental.c
#include "eaw-experimental.h" #include "libdwt.h" #include "inline.h" #include <assert.h> #include <string.h> #include <math.h> #include <stdlib.h> #include <limits.h> #ifdef _OPENMP #include <omp.h> #endif /** * @brief Copy memory area. * * This function copies @p n floats from memory area @p src to memory area * @p dst. Memory areas can be sparse. The strides (in bytes) are determined by * @p stride_dst and @p stride_src arguments. * * @returns The function returns a pointer to @p dst. */ static void *dwt_util_memcpy_stride_s( void *restrict dst, ssize_t stride_dst, const void *restrict src, ssize_t stride_src, size_t n ///< Number of floats to be copied, not number of bytes. ) { assert( NULL != dst && NULL != src ); const size_t size = sizeof(float); if( (ssize_t)size == stride_src && (ssize_t)size == stride_dst ) { memcpy(dst, src, n*size); } else { char *restrict ptr_dst = (char *restrict)dst; const char *restrict ptr_src = (const char *restrict)src; for(size_t i = 0; i < n; i++) { *(float *restrict)ptr_dst = *(const float *restrict)ptr_src; ptr_dst += stride_dst; ptr_src += stride_src; } } return dst; } static float dwt_eaw_w(float n, float m, float alpha) { const float eps = 1.0e-5f; return 1.f / (powf(fabsf(n-m), alpha) + eps); } static void dwt_calc_eaw_w(float *w, float *arr, int N, float alpha) { for(int i = 0; i < N-1; i++) { w[i] = dwt_eaw_w(arr[i], arr[i+1], alpha); } w[N-1] = 0.f; // not necessary } void dwt_eaw97_f_ex_stride_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N, int stride, float *w, float alpha ) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst_l[0] = src[0] * dwt_cdf97_s1_s; return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp, sizeof(float), src, stride, N); dwt_calc_eaw_w(w, tmp, N, alpha); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] -= (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR) * (2.f*dwt_cdf97_p1_s); } if( is_odd(N) ) { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_u1_s); } else { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_p1_s); } { float wL = w[0]; float wR = w[0]; tmp[0] += (wL * tmp[1] + wR * tmp[1]) / (wL+wR) * (2.f*dwt_cdf97_u1_s); } for(int i=2; i<N-(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] += (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR) * (2.f*dwt_cdf97_u1_s); } // predict 2 + update 2 for(int i=1; i<N-2+(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] -= (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR) * (2.f*dwt_cdf97_p2_s); } if( is_odd(N) ) { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_u2_s); } else { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_p2_s); } { float wL = w[0]; float wR = w[0]; tmp[0] += (wL * tmp[1] + wR * tmp[1]) / (wL+wR) * (2.f*dwt_cdf97_u2_s); } for(int i=2; i<N-(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] += (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR) * (2.f*dwt_cdf97_u2_s); } // scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf97_s1_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf97_s2_s; // copy tmp into dst dwt_util_memcpy_stride_s(dst_l, stride, tmp+0, 2*sizeof(float), ceil_div2(N)); dwt_util_memcpy_stride_s(dst_h, stride, tmp+1, 2*sizeof(float), floor_div2(N)); } void dwt_eaw97_i_ex_stride_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N, int stride, float *w ) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst[0] = src_l[0] * dwt_cdf97_s2_s; return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp+0, 2*sizeof(float), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_s(tmp+1, 2*sizeof(float), src_h, stride, floor_div2(N)); // inverse scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf97_s2_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf97_s1_s; // backward update 2 + backward predict 2 for(int i=2; i<N-(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] -= ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR) * (2.f*dwt_cdf97_u2_s); } { float wL = w[0]; float wR = w[0]; tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / (wL+wR) * (2.f*dwt_cdf97_u2_s); } if( is_odd(N) ) { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_u2_s); } else { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_p2_s); } for(int i=1; i<N-2+(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] += ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR) * (2.f*dwt_cdf97_p2_s); } // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] -= ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR) * (2.f*dwt_cdf97_u1_s); } { float wL = w[0]; float wR = w[0]; tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / (wL+wR) * (2.f*dwt_cdf97_u1_s); } if( is_odd(N) ) { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_u1_s); } else { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR) * (2.f*dwt_cdf97_p1_s); } for(int i=1; i<N-2+(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] += ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR) * (2.f*dwt_cdf97_p1_s); } // copy tmp into dst dwt_util_memcpy_stride_s(dst, stride, tmp, sizeof(float), N); } void dwt_eaw97_2f_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding, float *wH[], float *wV[], float alpha ) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float temp[size_o_big_max]; if(NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; // dwt_util_log(LOG_DBG, "FWD-EAW-5/3: j = %i with wH[%i] wV[%i]\n", j, j, j); const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); wH[j] = dwt_util_alloc(size_o_src_y * size_i_src_x, sizeof(float)); wV[j] = dwt_util_alloc(size_o_src_x * size_i_src_y, sizeof(float)); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_eaw97_f_ex_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), temp, size_i_src_x, // N stride_y, &wH[j][y*size_i_src_x], alpha ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_eaw97_f_ex_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), temp, size_i_src_y, // N stride_x, &wV[j][x*size_i_src_y], alpha ); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } } void dwt_eaw97_2i_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, float *wH[], float *wV[] ) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float temp[size_o_big_max]; if(NULL == temp) abort(); int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; // dwt_util_log(LOG_DBG, "INV-EAW-5/3: j = %i with wH[%i] wV[%i]\n", j, j-1, j-1); const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_eaw97_i_ex_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_src_y,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), temp, size_i_dst_y, // N stride_x, &wV[j-1][x*size_i_dst_y] ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_eaw97_i_ex_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_src_x,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), temp, size_i_dst_x, // N stride_y, &wH[j-1][y*size_i_dst_x] ); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } }
#include "eaw-experimental.h" #include "libdwt.h" #include "inline.h" #include <assert.h> #include <string.h> #include <math.h> #include <stdlib.h> #include <limits.h> /** * @brief Copy memory area. * * This function copies @p n floats from memory area @p src to memory area * @p dst. Memory areas can be sparse. The strides (in bytes) are determined by * @p stride_dst and @p stride_src arguments. * * @returns The function returns a pointer to @p dst. */ static void * dwt_util_memcpy_stride_s( void *restrict dst, ssize_t stride_dst, const void *restrict src, ssize_t stride_src, size_t n /// <Number of floats to be copied, not number of bytes. ) { assert(NULL != dst && NULL != src); const size_t size = sizeof(float); if ((ssize_t) size == stride_src && (ssize_t) size == stride_dst) { memcpy(dst, src, n * size); } else { char *restrict ptr_dst = (char *restrict)dst; const char *restrict ptr_src = (const char *restrict)src; for (size_t i = 0; i < n; i++) { *(float *restrict)ptr_dst = *(const float *restrict)ptr_src; ptr_dst += stride_dst; ptr_src += stride_src; } } return dst; } static float dwt_eaw_w(float n, float m, float alpha) { const float eps = 1.0e-5 f; return 1. f / (powf(fabsf(n - m), alpha) + eps); } static void dwt_calc_eaw_w(float *w, float *arr, int N, float alpha) { for (int i = 0; i < N - 1; i++) { w[i] = dwt_eaw_w(arr[i], arr[i + 1], alpha); } w[N - 1] = 0. f; //not necessary } void dwt_eaw97_f_ex_stride_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N, int stride, float *w, float alpha ) { assert(N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride); //fix for small N if (N < 2) { if (1 == N) dst_l[0] = src[0] * dwt_cdf97_s1_s; return; } //copy src into tmp dwt_util_memcpy_stride_s(tmp, sizeof(float), src, stride, N); dwt_calc_eaw_w(w, tmp, N, alpha); //predict 1 + update 1 for (int i = 1; i < N - 2 + (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] -= (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_p1_s); } if (is_odd(N)) { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] += (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } else { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] -= (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_p1_s); } { float wL = w[0]; float wR = w[0]; tmp[0] += (wL * tmp[1] + wR * tmp[1]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } for (int i = 2; i < N - (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] += (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } //predict 2 + update 2 for (int i = 1; i < N - 2 + (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] -= (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_p2_s); } if (is_odd(N)) { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] += (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } else { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] -= (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_p2_s); } { float wL = w[0]; float wR = w[0]; tmp[0] += (wL * tmp[1] + wR * tmp[1]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } for (int i = 2; i < N - (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] += (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } //scale for (int i = 0; i < N; i += 2) tmp[i] = tmp[i] * dwt_cdf97_s1_s; for (int i = 1; i < N; i += 2) tmp[i] = tmp[i] * dwt_cdf97_s2_s; //copy tmp into dst dwt_util_memcpy_stride_s(dst_l, stride, tmp + 0, 2 * sizeof(float), ceil_div2(N)); dwt_util_memcpy_stride_s(dst_h, stride, tmp + 1, 2 * sizeof(float), floor_div2(N)); } void dwt_eaw97_i_ex_stride_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N, int stride, float *w ) { assert(N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride); //fix for small N if (N < 2) { if (1 == N) dst[0] = src_l[0] * dwt_cdf97_s2_s; return; } //copy src into tmp dwt_util_memcpy_stride_s(tmp + 0, 2 * sizeof(float), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_s(tmp + 1, 2 * sizeof(float), src_h, stride, floor_div2(N)); //inverse scale for (int i = 0; i < N; i += 2) tmp[i] = tmp[i] * dwt_cdf97_s2_s; for (int i = 1; i < N; i += 2) tmp[i] = tmp[i] * dwt_cdf97_s1_s; //backward update 2 + backward predict 2 for (int i = 2; i < N - (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] -= (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } { float wL = w[0]; float wR = w[0]; tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } if (is_odd(N)) { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] -= (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } else { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] += (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_p2_s); } for (int i = 1; i < N - 2 + (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] += (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_p2_s); } //backward update 1 + backward predict 1 for (int i = 2; i < N - (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] -= (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } { float wL = w[0]; float wR = w[0]; tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } if (is_odd(N)) { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] -= (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } else { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] += (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_p1_s); } for (int i = 1; i < N - 2 + (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] += (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_p1_s); } //copy tmp into dst dwt_util_memcpy_stride_s(dst, stride, tmp, sizeof(float), N); } void dwt_eaw97_2f_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding, float *wH[], float *wV[], float alpha ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); float temp[size_o_big_max]; if (NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one ? size_o_big_max : size_o_big_min); if (*j_max_ptr < 0 || *j_max_ptr > j_limit) *j_max_ptr = j_limit; for (;;) { if (*j_max_ptr == j) break; //dwt_util_log(LOG_DBG, "FWD-EAW-5/3: j = %i with wH[%i] wV[%i]\n", j, j, j); const int size_o_src_x = ceil_div_pow2(size_o_big_x, j); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j + 1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j + 1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j); wH[j] = dwt_util_alloc(size_o_src_y * size_i_src_x, sizeof(float)); wV[j] = dwt_util_alloc(size_o_src_x * size_i_src_y, sizeof(float)); for (int y = 0; y < size_o_src_y; y++) dwt_eaw97_f_ex_stride_s( addr2_s(ptr, y, 0, stride_x, stride_y), addr2_s(ptr, y, 0, stride_x, stride_y), addr2_s(ptr, y, size_o_dst_x, stride_x, stride_y), temp, size_i_src_x, //N stride_y, &wH[j][y * size_i_src_x], alpha ); for (int x = 0; x < size_o_src_x; x++) dwt_eaw97_f_ex_stride_s( addr2_s(ptr, 0, x, stride_x, stride_y), addr2_s(ptr, 0, x, stride_x, stride_y), addr2_s(ptr, size_o_dst_y, x, stride_x, stride_y), temp, size_i_src_y, //N stride_x, &wV[j][x * size_i_src_y], alpha ); if (zero_padding) { for (int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_s( addr2_s(ptr, y, 0, stride_x, stride_y), addr2_s(ptr, y, size_o_dst_x, stride_x, stride_y), size_i_src_x, size_o_dst_x, size_o_src_x - size_o_dst_x, stride_y); for (int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_s( addr2_s(ptr, 0, x, stride_x, stride_y), addr2_s(ptr, size_o_dst_y, x, stride_x, stride_y), size_i_src_y, size_o_dst_y, size_o_src_y - size_o_dst_y, stride_x); } j++; } } void dwt_eaw97_2i_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, float *wH[], float *wV[] ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); float temp[size_o_big_max]; if (NULL == temp) abort(); int j = ceil_log2(decompose_one ? size_o_big_max : size_o_big_min); if (j_max >= 0 && j_max < j) j = j_max; for (;;) { if (0 == j) break; //dwt_util_log(LOG_DBG, "INV-EAW-5/3: j = %i with wH[%i] wV[%i]\n", j, j - 1, j - 1); const int size_o_src_x = ceil_div_pow2(size_o_big_x, j); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j - 1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j - 1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j - 1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j - 1); for (int x = 0; x < size_o_dst_x; x++) dwt_eaw97_i_ex_stride_s( addr2_s(ptr, 0, x, stride_x, stride_y), addr2_s(ptr, size_o_src_y, x, stride_x, stride_y), addr2_s(ptr, 0, x, stride_x, stride_y), temp, size_i_dst_y, //N stride_x, &wV[j - 1][x * size_i_dst_y] ); for (int y = 0; y < size_o_dst_y; y++) dwt_eaw97_i_ex_stride_s( addr2_s(ptr, y, 0, stride_x, stride_y), addr2_s(ptr, y, size_o_src_x, stride_x, stride_y), addr2_s(ptr, y, 0, stride_x, stride_y), temp, size_i_dst_x, //N stride_y, &wH[j - 1][y * size_i_dst_x] ); if (zero_padding) { for (int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_s( addr2_s(ptr, y, 0, stride_x, stride_y), size_i_dst_x, size_o_dst_x, stride_y); for (int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_s( addr2_s(ptr, 0, x, stride_x, stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } }
#include "eaw-experimental.h" #include "libdwt.h" #include "inline.h" #include <assert.h> #include <string.h> #include <math.h> #include <stdlib.h> #include <limits.h> #ifdef _OPENMP #include <omp.h> #endif /** * @brief Copy memory area. * * This function copies @p n floats from memory area @p src to memory area * @p dst. Memory areas can be sparse. The strides (in bytes) are determined by * @p stride_dst and @p stride_src arguments. * * @returns The function returns a pointer to @p dst. */ static void * dwt_util_memcpy_stride_s( void *restrict dst, ssize_t stride_dst, const void *restrict src, ssize_t stride_src, size_t n /// <Number of floats to be copied, not number of bytes. ) { assert(NULL != dst && NULL != src); const size_t size = sizeof(float); if ((ssize_t) size == stride_src && (ssize_t) size == stride_dst) { memcpy(dst, src, n * size); } else { char *restrict ptr_dst = (char *restrict)dst; const char *restrict ptr_src = (const char *restrict)src; for (size_t i = 0; i < n; i++) { *(float *restrict)ptr_dst = *(const float *restrict)ptr_src; ptr_dst += stride_dst; ptr_src += stride_src; } } return dst; } static float dwt_eaw_w(float n, float m, float alpha) { const float eps = 1.0e-5 f; return 1. f / (powf(fabsf(n - m), alpha) + eps); } static void dwt_calc_eaw_w(float *w, float *arr, int N, float alpha) { for (int i = 0; i < N - 1; i++) { w[i] = dwt_eaw_w(arr[i], arr[i + 1], alpha); } w[N - 1] = 0. f; //not necessary } void dwt_eaw97_f_ex_stride_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N, int stride, float *w, float alpha ) { assert(N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride); //fix for small N if (N < 2) { if (1 == N) dst_l[0] = src[0] * dwt_cdf97_s1_s; return; } //copy src into tmp dwt_util_memcpy_stride_s(tmp, sizeof(float), src, stride, N); dwt_calc_eaw_w(w, tmp, N, alpha); //predict 1 + update 1 for (int i = 1; i < N - 2 + (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] -= (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_p1_s); } if (is_odd(N)) { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] += (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } else { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] -= (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_p1_s); } { float wL = w[0]; float wR = w[0]; tmp[0] += (wL * tmp[1] + wR * tmp[1]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } for (int i = 2; i < N - (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] += (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } //predict 2 + update 2 for (int i = 1; i < N - 2 + (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] -= (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_p2_s); } if (is_odd(N)) { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] += (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } else { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] -= (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_p2_s); } { float wL = w[0]; float wR = w[0]; tmp[0] += (wL * tmp[1] + wR * tmp[1]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } for (int i = 2; i < N - (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] += (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } //scale for (int i = 0; i < N; i += 2) tmp[i] = tmp[i] * dwt_cdf97_s1_s; for (int i = 1; i < N; i += 2) tmp[i] = tmp[i] * dwt_cdf97_s2_s; //copy tmp into dst dwt_util_memcpy_stride_s(dst_l, stride, tmp + 0, 2 * sizeof(float), ceil_div2(N)); dwt_util_memcpy_stride_s(dst_h, stride, tmp + 1, 2 * sizeof(float), floor_div2(N)); } void dwt_eaw97_i_ex_stride_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N, int stride, float *w ) { assert(N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride); //fix for small N if (N < 2) { if (1 == N) dst[0] = src_l[0] * dwt_cdf97_s2_s; return; } //copy src into tmp dwt_util_memcpy_stride_s(tmp + 0, 2 * sizeof(float), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_s(tmp + 1, 2 * sizeof(float), src_h, stride, floor_div2(N)); //inverse scale for (int i = 0; i < N; i += 2) tmp[i] = tmp[i] * dwt_cdf97_s2_s; for (int i = 1; i < N; i += 2) tmp[i] = tmp[i] * dwt_cdf97_s1_s; //backward update 2 + backward predict 2 for (int i = 2; i < N - (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] -= (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } { float wL = w[0]; float wR = w[0]; tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } if (is_odd(N)) { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] -= (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_u2_s); } else { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] += (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_p2_s); } for (int i = 1; i < N - 2 + (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] += (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_p2_s); } //backward update 1 + backward predict 1 for (int i = 2; i < N - (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] -= (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } { float wL = w[0]; float wR = w[0]; tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } if (is_odd(N)) { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] -= (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_u1_s); } else { float wL = w[N - 2]; float wR = w[N - 2]; tmp[N - 1] += (wL * tmp[N - 2] + wR * tmp[N - 2]) / (wL + wR) * (2. f * dwt_cdf97_p1_s); } for (int i = 1; i < N - 2 + (N & 1); i += 2) { float wL = w[i - 1]; float wR = w[i + 0]; tmp[i] += (wL * tmp[i - 1] + wR * tmp[i + 1]) / (wL + wR) * (2. f * dwt_cdf97_p1_s); } //copy tmp into dst dwt_util_memcpy_stride_s(dst, stride, tmp, sizeof(float), N); } void dwt_eaw97_2f_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding, float *wH[], float *wV[], float alpha ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); float temp[size_o_big_max]; if (NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one ? size_o_big_max : size_o_big_min); if (*j_max_ptr < 0 || *j_max_ptr > j_limit) *j_max_ptr = j_limit; for (;;) { if (*j_max_ptr == j) break; //dwt_util_log(LOG_DBG, "FWD-EAW-5/3: j = %i with wH[%i] wV[%i]\n", j, j, j); const int size_o_src_x = ceil_div_pow2(size_o_big_x, j); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j + 1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j + 1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j); wH[j] = dwt_util_alloc(size_o_src_y * size_i_src_x, sizeof(float)); wV[j] = dwt_util_alloc(size_o_src_x * size_i_src_y, sizeof(float)); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for (int y = 0; y < size_o_src_y; y++) dwt_eaw97_f_ex_stride_s( addr2_s(ptr, y, 0, stride_x, stride_y), addr2_s(ptr, y, 0, stride_x, stride_y), addr2_s(ptr, y, size_o_dst_x, stride_x, stride_y), temp, size_i_src_x, //N stride_y, &wH[j][y * size_i_src_x], alpha ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for (int x = 0; x < size_o_src_x; x++) dwt_eaw97_f_ex_stride_s( addr2_s(ptr, 0, x, stride_x, stride_y), addr2_s(ptr, 0, x, stride_x, stride_y), addr2_s(ptr, size_o_dst_y, x, stride_x, stride_y), temp, size_i_src_y, //N stride_x, &wV[j][x * size_i_src_y], alpha ); if (zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for (int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_s( addr2_s(ptr, y, 0, stride_x, stride_y), addr2_s(ptr, y, size_o_dst_x, stride_x, stride_y), size_i_src_x, size_o_dst_x, size_o_src_x - size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for (int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_s( addr2_s(ptr, 0, x, stride_x, stride_y), addr2_s(ptr, size_o_dst_y, x, stride_x, stride_y), size_i_src_y, size_o_dst_y, size_o_src_y - size_o_dst_y, stride_x); } j++; } } void dwt_eaw97_2i_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, float *wH[], float *wV[] ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); float temp[size_o_big_max]; if (NULL == temp) abort(); int j = ceil_log2(decompose_one ? size_o_big_max : size_o_big_min); if (j_max >= 0 && j_max < j) j = j_max; for (;;) { if (0 == j) break; //dwt_util_log(LOG_DBG, "INV-EAW-5/3: j = %i with wH[%i] wV[%i]\n", j, j - 1, j - 1); const int size_o_src_x = ceil_div_pow2(size_o_big_x, j); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j - 1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j - 1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j - 1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j - 1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for (int x = 0; x < size_o_dst_x; x++) dwt_eaw97_i_ex_stride_s( addr2_s(ptr, 0, x, stride_x, stride_y), addr2_s(ptr, size_o_src_y, x, stride_x, stride_y), addr2_s(ptr, 0, x, stride_x, stride_y), temp, size_i_dst_y, //N stride_x, &wV[j - 1][x * size_i_dst_y] ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for (int y = 0; y < size_o_dst_y; y++) dwt_eaw97_i_ex_stride_s( addr2_s(ptr, y, 0, stride_x, stride_y), addr2_s(ptr, y, size_o_src_x, stride_x, stride_y), addr2_s(ptr, y, 0, stride_x, stride_y), temp, size_i_dst_x, //N stride_y, &wH[j - 1][y * size_i_dst_x] ); if (zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for (int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_s( addr2_s(ptr, y, 0, stride_x, stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for (int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_s( addr2_s(ptr, 0, x, stride_x, stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } }
GB_unaryop__identity_int64_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_int32 // op(A') function: GB_tran__identity_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_int32 ( int64_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_int32 // op(A') function: GB_tran__identity_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_int32 ( int64_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int64_int32 // op(A') function: GB_tran__identity_int64_int32 // C type: int64_t // A type: int32_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int64_int32 ( int64_t *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int64_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
syr2k.c
/** * syr2k.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" #define BENCHMARK_NAME "SYR2K" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.10 /* Problem size */ #ifdef RUN_POLYBENCH_SIZE #define SIZE 2048 #elif RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define N SIZE #define M SIZE /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 12435 #define BETA 4546 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] = ((DATA_TYPE)i * j + 2) / N; } for (j = 0; j < M; j++) { A[i * N + j] = ((DATA_TYPE)i * j) / N; B[i * N + j] = ((DATA_TYPE)i * j + 1) / N; } } } void syr2k(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] *= BETA; } } for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < M; k++) { C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k]; C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k]; } } } } void syr2k_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *Cinit) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Cinit[i * N + j] *= BETA; } } #pragma omp target teams map(to : A[ : N *M], B[ : N *M], Cinit[ : N *N]) map(from : C[ : N *N]) device(DEVICE_ID) #pragma omp distribute parallel for collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { C[i * N + j] = Cinit[i * N + j]; for (int k = 0; k < M; k++) { C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k]; C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k]; } } } } int compareResults(DATA_TYPE *C, DATA_TYPE *C_Gpu) { int i, j, fail; fail = 0; // Compare C with D for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (percentDiff(C[i * N + j], C_Gpu[i * N + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } int main() { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *Cinit; DATA_TYPE *C_Gpu; A = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); C = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); Cinit = (DATA_TYPE *)malloc(N * M * sizeof(DATA_TYPE)); C_Gpu = (DATA_TYPE *)calloc(N * M, sizeof(DATA_TYPE)); //fprintf(stdout, "<< Symmetric rank-2k operations size: %d>>\n", SIZE); printBenchmarkInfo(BENCHMARK_NAME, SIZE); init_arrays(A, B, Cinit); t_start = rtclock(); syr2k_OMP(A, B, C_Gpu, Cinit); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST init_arrays(A, B, C); t_start = rtclock(); syr2k(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(C, C_Gpu); #endif free(A); free(B); free(C); free(C_Gpu); return fail; }
/** * syr2k.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #include "BenchmarksUtil.h" #define BENCHMARK_NAME "SYR2K" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.10 /* Problem size */ #ifdef RUN_POLYBENCH_SIZE #define SIZE 2048 #elif RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define N SIZE #define M SIZE /* * Declared constant values for ALPHA and BETA (same as values in * PolyBench 2.0) */ #define ALPHA 12435 #define BETA 4546 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * C) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] = ((DATA_TYPE) i * j + 2) / N; } for (j = 0; j < M; j++) { A[i * N + j] = ((DATA_TYPE) i * j) / N; B[i * N + j] = ((DATA_TYPE) i * j + 1) / N; } } } void syr2k(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] *= BETA; } } for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < M; k++) { C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k]; C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k]; } } } } void syr2k_OMP(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * C, DATA_TYPE * Cinit) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Cinit[i * N + j] *= BETA; } } for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { C[i * N + j] = Cinit[i * N + j]; for (int k = 0; k < M; k++) { C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k]; C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k]; } } } } int compareResults(DATA_TYPE * C, DATA_TYPE * C_Gpu) { int i, j, fail; fail = 0; //Compare C with D for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (percentDiff(C[i * N + j], C_Gpu[i * N + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } //print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } int main() { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *Cinit; DATA_TYPE *C_Gpu; A = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); B = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); C = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); Cinit = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); C_Gpu = (DATA_TYPE *) calloc(N * M, sizeof(DATA_TYPE)); //fprintf(stdout, "<< Symmetric rank-2k operations size: %d>>\n", SIZE); printBenchmarkInfo(BENCHMARK_NAME, SIZE); init_arrays(A, B, Cinit); t_start = rtclock(); syr2k_OMP(A, B, C_Gpu, Cinit); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST init_arrays(A, B, C); t_start = rtclock(); syr2k(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(C, C_Gpu); #endif free(A); free(B); free(C); free(C_Gpu); return fail; }
/** * syr2k.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" #define BENCHMARK_NAME "SYR2K" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.10 /* Problem size */ #ifdef RUN_POLYBENCH_SIZE #define SIZE 2048 #elif RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define N SIZE #define M SIZE /* * Declared constant values for ALPHA and BETA (same as values in * PolyBench 2.0) */ #define ALPHA 12435 #define BETA 4546 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * C) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] = ((DATA_TYPE) i * j + 2) / N; } for (j = 0; j < M; j++) { A[i * N + j] = ((DATA_TYPE) i * j) / N; B[i * N + j] = ((DATA_TYPE) i * j + 1) / N; } } } void syr2k(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i * N + j] *= BETA; } } for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { for (k = 0; k < M; k++) { C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k]; C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k]; } } } } void syr2k_OMP(DATA_TYPE * A, DATA_TYPE * B, DATA_TYPE * C, DATA_TYPE * Cinit) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { Cinit[i * N + j] *= BETA; } } #pragma omp target teams map(to : A[ : N *M], B[ : N *M], Cinit[ : N *N]) map(from : C[ : N *N]) device(DEVICE_ID) #pragma omp distribute parallel for collapse(2) for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { C[i * N + j] = Cinit[i * N + j]; for (int k = 0; k < M; k++) { C[i * N + j] += ALPHA * A[i * M + k] * B[j * M + k]; C[i * N + j] += ALPHA * B[i * M + k] * A[j * M + k]; } } } } int compareResults(DATA_TYPE * C, DATA_TYPE * C_Gpu) { int i, j, fail; fail = 0; //Compare C with D for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { if (percentDiff(C[i * N + j], C_Gpu[i * N + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } //print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } int main() { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *Cinit; DATA_TYPE *C_Gpu; A = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); B = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); C = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); Cinit = (DATA_TYPE *) malloc(N * M * sizeof(DATA_TYPE)); C_Gpu = (DATA_TYPE *) calloc(N * M, sizeof(DATA_TYPE)); //fprintf(stdout, "<< Symmetric rank-2k operations size: %d>>\n", SIZE); printBenchmarkInfo(BENCHMARK_NAME, SIZE); init_arrays(A, B, Cinit); t_start = rtclock(); syr2k_OMP(A, B, C_Gpu, Cinit); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST init_arrays(A, B, C); t_start = rtclock(); syr2k(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(C, C_Gpu); #endif free(A); free(B); free(C); free(C_Gpu); return fail; }
tree-pretty-print.c
/* Pretty formatting of GENERIC trees in C syntax. Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc. Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "output.h" #include "diagnostic.h" #include "real.h" #include "hashtab.h" #include "tree-flow.h" #include "langhooks.h" #include "tree-iterator.h" #include "tree-chrec.h" #include "tree-pass.h" #include "fixed-value.h" #include "value-prof.h" /* Local functions, macros and variables. */ static int op_prio (const_tree); static const char *op_symbol (const_tree); static void pretty_print_string (pretty_printer *, const char*); static void print_call_name (pretty_printer *, const_tree); static void newline_and_indent (pretty_printer *, int); static void maybe_init_pretty_print (FILE *); static void print_declaration (pretty_printer *, tree, int, int); static void print_struct_decl (pretty_printer *, const_tree, int, int); static void do_niy (pretty_printer *, const_tree); static void dump_vops (pretty_printer *, tree, int, int); static void dump_generic_bb_buff (pretty_printer *, basic_block, int, int); #define INDENT(SPACE) do { \ int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0) #define NIY do_niy(buffer,node) #define PRINT_FUNCTION_NAME(NODE) pp_printf \ (buffer, "%s", TREE_CODE (NODE) == NOP_EXPR ? \ lang_hooks.decl_printable_name (TREE_OPERAND (NODE, 0), 1) : \ lang_hooks.decl_printable_name (NODE, 1)) static pretty_printer buffer; static int initialized = 0; /* Try to print something for an unknown tree code. */ static void do_niy (pretty_printer *buffer, const_tree node) { int i, len; pp_string (buffer, "<<< Unknown tree: "); pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]); if (EXPR_P (node)) { len = TREE_OPERAND_LENGTH (node); for (i = 0; i < len; ++i) { newline_and_indent (buffer, 2); dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false); } } pp_string (buffer, " >>>\n"); } /* Debugging function to print out a generic expression. */ void debug_generic_expr (tree t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a generic statement. */ void debug_generic_stmt (tree t) { print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a chain of trees . */ void debug_tree_chain (tree t) { while (t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf(stderr, " "); t = TREE_CHAIN (t); } fprintf (stderr, "\n"); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl (FILE *file, tree decl, int flags) { maybe_init_pretty_print (file); print_declaration (&buffer, decl, 2, flags); pp_write_text_to_stream (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_stmt (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, true); pp_flush (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. The output is indented by INDENT spaces. */ void print_generic_stmt_indented (FILE *file, tree t, int flags, int indent) { int i; maybe_init_pretty_print (file); for (i = 0; i < indent; i++) pp_space (&buffer); dump_generic_node (&buffer, t, indent, flags, true); pp_flush (&buffer); } /* Print a single expression T on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_expr (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, false); } /* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name (pretty_printer *buffer, tree node, int flags) { tree t = node; if (DECL_NAME (t)) pp_tree_identifier (buffer, DECL_NAME (t)); if ((flags & TDF_UID) || DECL_NAME (t) == NULL_TREE) { if (TREE_CODE (t) == LABEL_DECL && LABEL_DECL_UID (t) != -1) pp_printf (buffer, "L.%d", (int) LABEL_DECL_UID (t)); else { char c = TREE_CODE (t) == CONST_DECL ? 'C' : 'D'; pp_printf (buffer, "%c.%u", c, DECL_UID (t)); } } } /* Like the above, but used for pretty printing function calls. */ static void dump_function_name (pretty_printer *buffer, tree node) { if (DECL_NAME (node)) PRINT_FUNCTION_NAME (node); else dump_decl_name (buffer, node, 0); } /* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_function_declaration (pretty_printer *buffer, tree node, int spc, int flags) { bool wrote_arg = false; tree arg; pp_space (buffer); pp_character (buffer, '('); /* Print the argument types. The last element in the list is a VOID_TYPE. The following avoids printing the last element. */ arg = TYPE_ARG_TYPES (node); while (arg && TREE_CHAIN (arg) && arg != error_mark_node) { wrote_arg = true; dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false); arg = TREE_CHAIN (arg); if (TREE_CHAIN (arg) && TREE_CODE (TREE_CHAIN (arg)) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } if (!wrote_arg) pp_string (buffer, "void"); pp_character (buffer, ')'); } /* Dump the domain associated with an array. */ static void dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags) { pp_character (buffer, '['); if (domain) { tree min = TYPE_MIN_VALUE (domain); tree max = TYPE_MAX_VALUE (domain); if (min && max && integer_zerop (min) && host_integerp (max, 0)) pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1); else { if (min) dump_generic_node (buffer, min, spc, flags, false); pp_character (buffer, ':'); if (max) dump_generic_node (buffer, max, spc, flags, false); } } else pp_string (buffer, "<unknown>"); pp_character (buffer, ']'); } /* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags) { const char *name; switch (OMP_CLAUSE_CODE (clause)) { case OMP_CLAUSE_PRIVATE: name = "private"; goto print_remap; case OMP_CLAUSE_SHARED: name = "shared"; goto print_remap; case OMP_CLAUSE_FIRSTPRIVATE: name = "firstprivate"; goto print_remap; case OMP_CLAUSE_LASTPRIVATE: name = "lastprivate"; goto print_remap; case OMP_CLAUSE_COPYIN: name = "copyin"; goto print_remap; case OMP_CLAUSE_COPYPRIVATE: name = "copyprivate"; goto print_remap; print_remap: pp_string (buffer, name); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_REDUCTION: pp_string (buffer, "reduction("); pp_string (buffer, op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause))); pp_character (buffer, ':'); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_IF: pp_string (buffer, "if("); dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NUM_THREADS: pp_string (buffer, "num_threads("); dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NOWAIT: pp_string (buffer, "nowait"); break; case OMP_CLAUSE_ORDERED: pp_string (buffer, "ordered"); break; case OMP_CLAUSE_DEFAULT: pp_string (buffer, "default("); switch (OMP_CLAUSE_DEFAULT_KIND (clause)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: pp_string (buffer, "shared"); break; case OMP_CLAUSE_DEFAULT_NONE: pp_string (buffer, "none"); break; case OMP_CLAUSE_DEFAULT_PRIVATE: pp_string (buffer, "private"); break; default: gcc_unreachable (); } pp_character (buffer, ')'); break; case OMP_CLAUSE_SCHEDULE: pp_string (buffer, "schedule("); switch (OMP_CLAUSE_SCHEDULE_KIND (clause)) { case OMP_CLAUSE_SCHEDULE_STATIC: pp_string (buffer, "static"); break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: pp_string (buffer, "dynamic"); break; case OMP_CLAUSE_SCHEDULE_GUIDED: pp_string (buffer, "guided"); break; case OMP_CLAUSE_SCHEDULE_RUNTIME: pp_string (buffer, "runtime"); break; default: gcc_unreachable (); } if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause)) { pp_character (buffer, ','); dump_generic_node (buffer, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause), spc, flags, false); } pp_character (buffer, ')'); break; default: /* Should never happen. */ dump_generic_node (buffer, clause, spc, flags, false); break; } } /* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags) { if (clause == NULL) return; pp_space (buffer); while (1) { dump_omp_clause (buffer, clause, spc, flags); clause = OMP_CLAUSE_CHAIN (clause); if (clause == NULL) return; pp_space (buffer); } } /* Dump the set of decls SYMS. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_symbols (pretty_printer *buffer, bitmap syms, int flags) { unsigned i; bitmap_iterator bi; if (syms == NULL) pp_string (buffer, "NIL"); else { pp_string (buffer, " { "); EXECUTE_IF_SET_IN_BITMAP (syms, 0, i, bi) { tree sym = referenced_var_lookup (i); dump_generic_node (buffer, sym, 0, flags, false); pp_string (buffer, " "); } pp_string (buffer, "}"); } } /* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). If IS_STMT is true, the object printed is considered to be a statement and it is terminated by ';' if appropriate. */ int dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; if (node == NULL_TREE) return spc; is_expr = EXPR_P (node) || GIMPLE_STMT_P (node); /* We use has_stmt_ann because CALL_EXPR can be both an expression and a statement, and we have no guarantee that it will have a stmt_ann when it is used as an RHS expression. stmt_ann will assert if you call it on something with a non-stmt annotation attached. */ if (TREE_CODE (node) != ERROR_MARK && is_gimple_stmt (node) && (flags & (TDF_VOPS|TDF_MEMSYMS)) && has_stmt_ann (node) && TREE_CODE (node) != PHI_NODE) dump_vops (buffer, node, spc, flags); if (is_stmt && (flags & TDF_STMTADDR)) pp_printf (buffer, "<&%p> ", (void *)node); if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node)) { expanded_location xloc = expand_location (EXPR_LOCATION (node)); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, "] "); } switch (TREE_CODE (node)) { case ERROR_MARK: pp_string (buffer, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier (buffer, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE (node)) { dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false); pp_space (buffer); } dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false); node = TREE_CHAIN (node); if (node && TREE_CODE (node) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } break; case TREE_BINFO: dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false); case TREE_VEC: { size_t i; if (TREE_VEC_LENGTH (node) > 0) { size_t len = TREE_VEC_LENGTH (node); for (i = 0; i < len - 1; i++) { dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); } dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc, flags, false); } } break; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: { unsigned int quals = TYPE_QUALS (node); enum tree_code_class class; if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, "restrict "); class = TREE_CODE_CLASS (TREE_CODE (node)); if (class == tcc_declaration) { if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else pp_string (buffer, "<unnamed type decl>"); } else if (class == tcc_type) { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) pp_tree_identifier (buffer, TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_string (buffer, "<unnamed type>"); } else if (TREE_CODE (node) == VECTOR_TYPE) { pp_string (buffer, "vector "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == INTEGER_TYPE) { pp_string (buffer, (TYPE_UNSIGNED (node) ? "<unnamed-unsigned:" : "<unnamed-signed:")); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else pp_string (buffer, "<unnamed type>"); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&"); if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE (node); dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false); pp_space (buffer); pp_character (buffer, '('); pp_string (buffer, str); if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); pp_character (buffer, ')'); dump_function_declaration (buffer, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS (node); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); pp_string (buffer, str); if (quals & TYPE_QUAL_CONST) pp_string (buffer, " const"); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, " volatile"); if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, " restrict"); if (TYPE_REF_CAN_ALIAS_ALL (node)) pp_string (buffer, " {ref-all}"); } break; case OFFSET_TYPE: NIY; break; case METHOD_TYPE: dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), flags); pp_string (buffer, "::"); break; case TARGET_MEM_REF: { const char *sep = ""; tree tmp; pp_string (buffer, "MEM["); tmp = TMR_SYMBOL (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "symbol: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_BASE (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "base: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_INDEX (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "index: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_STEP (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "step: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_OFFSET (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "offset: "); dump_generic_node (buffer, tmp, spc, flags, false); } pp_string (buffer, "]"); if (flags & TDF_DETAILS) { pp_string (buffer, "{"); dump_generic_node (buffer, TMR_ORIGINAL (node), spc, flags, false); pp_string (buffer, "}"); } } break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) ; dump_generic_node (buffer, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { unsigned int quals = TYPE_QUALS (node); if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); /* Print the name of the structure. */ if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if (TREE_CODE (node) == UNION_TYPE) pp_string (buffer, "union "); if (TYPE_NAME (node)) dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false); else print_struct_decl (buffer, node, spc, flags); break; } case LANG_TYPE: NIY; break; case INTEGER_CST: if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE) { /* In the case of a pointer, one may want to divide by the size of the pointed-to type. Unfortunately, this not straightforward. The C front-end maps expressions (int *) 5 int *p; (p + 5) in such a way that the two INTEGER_CST nodes for "5" have different values but identical types. In the latter case, the 5 is multiplied by sizeof (int) in c-common.c (pointer_int_sum) to convert it to a byte address, and yet the type of the node is left unchanged. Argh. What is consistent though is that the number value corresponds to bytes (UNITS) offset. NB: Neither of the following divisors can be trivially used to recover the original literal: TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); pp_string (buffer, "B"); /* pseudo-unit */ } else if (! host_integerp (node, 0)) { tree val = node; unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val); HOST_WIDE_INT high = TREE_INT_CST_HIGH (val); if (tree_int_cst_sgn (val) < 0) { pp_character (buffer, '-'); high = ~high + !low; low = -low; } /* Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ sprintf (pp_buffer (buffer)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, high, low); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } else pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) pp_string (buffer, " overflow"); #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) pp_string (buffer, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf"); else if (REAL_VALUE_ISNAN (d)) pp_string (buffer, " Nan"); else { char string[100]; real_to_decimal (string, &d, sizeof (string), 0, 1); pp_string (buffer, string); } #else { HOST_WIDE_INT i; unsigned char *p = (unsigned char *) &TREE_REAL_CST (node); pp_string (buffer, "0x"); for (i = 0; i < sizeof TREE_REAL_CST (node); i++) output_formatted_integer (buffer, "%02x", *p++); } #endif break; } case FIXED_CST: { char string[100]; fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string)); pp_string (buffer, string); break; } case COMPLEX_CST: pp_string (buffer, "__complex__ ("); dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false); pp_string (buffer, ")"); break; case STRING_CST: pp_string (buffer, "\""); pretty_print_string (buffer, TREE_STRING_POINTER (node)); pp_string (buffer, "\""); break; case VECTOR_CST: { tree elt; pp_string (buffer, "{ "); for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt)) { dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false); if (TREE_CHAIN (elt)) pp_string (buffer, ", "); } pp_string (buffer, " }"); } break; case FUNCTION_TYPE: break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name (buffer, node, flags); break; case LABEL_DECL: if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else if (LABEL_DECL_UID (node) != -1) pp_printf (buffer, "<L%d>", (int) LABEL_DECL_UID (node)); else pp_printf (buffer, "<D.%u>", DECL_UID (node)); break; case TYPE_DECL: if (DECL_IS_BUILTIN (node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else { if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) && TYPE_METHODS (TREE_TYPE (node))) { /* The type is a c++ class: all structures have at least 4 methods. */ pp_string (buffer, "class "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else { pp_string (buffer, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } } break; case SYMBOL_MEMORY_TAG: case NAME_MEMORY_TAG: case STRUCT_FIELD_TAG: case VAR_DECL: case PARM_DECL: case FIELD_DECL: case NAMESPACE_DECL: case MEMORY_PARTITION_TAG: dump_decl_name (buffer, node, flags); break; case RESULT_DECL: pp_string (buffer, "<retval>"); break; case COMPONENT_REF: op0 = TREE_OPERAND (node, 0); str = "."; if (TREE_CODE (op0) == INDIRECT_REF) { op0 = TREE_OPERAND (op0, 0); str = "->"; } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_string (buffer, str); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (op0) != VALUE_HANDLE) { op0 = component_ref_field_offset (node); if (op0 && TREE_CODE (op0) != INTEGER_CST) { pp_string (buffer, "{off: "); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, '}'); } } break; case BIT_FIELD_REF: pp_string (buffer, "BIT_FIELD_REF <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND (node, 0); if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_character (buffer, '['); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (node) == ARRAY_RANGE_REF) pp_string (buffer, " ..."); pp_character (buffer, ']'); op0 = array_ref_low_bound (node); op1 = array_ref_element_size (node); if (!integer_zerop (op0) || TREE_OPERAND (node, 2) || TREE_OPERAND (node, 3)) { pp_string (buffer, "{lb: "); dump_generic_node (buffer, op0, spc, flags, false); pp_string (buffer, " sz: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, '}'); } break; case CONSTRUCTOR: { unsigned HOST_WIDE_INT ix; tree field, val; bool is_struct_init = FALSE; pp_character (buffer, '{'); if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) is_struct_init = TRUE; FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { if (field && is_struct_init) { pp_character (buffer, '.'); dump_generic_node (buffer, field, spc, flags, false); pp_string (buffer, "="); } if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); if (val && TREE_CODE (val) == FUNCTION_DECL) dump_decl_name (buffer, val, flags); else dump_generic_node (buffer, val, spc, flags, false); if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1) { pp_character (buffer, ','); pp_space (buffer); } } pp_character (buffer, '}'); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string (buffer, "<COMPOUND_EXPR>"); break; } dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } for (tp = &TREE_OPERAND (node, 1); TREE_CODE (*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND (*tp, 1)) { dump_generic_node (buffer, TREE_OPERAND (*tp, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } } dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM)); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if (flags & TDF_SLIM) { pp_string (buffer, "<STATEMENT_LIST>"); break; } for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si)) { if (!first) newline_and_indent (buffer, spc); else first = false; dump_generic_node (buffer, tsi_stmt (si), spc, flags, true); } } break; case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: case INIT_EXPR: dump_generic_node (buffer, GENERIC_TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); if (TREE_CODE (node) == GIMPLE_MODIFY_STMT && MOVE_NONTEMPORAL (node)) pp_string (buffer, "{nt}"); if (TREE_CODE (node) == GIMPLE_MODIFY_STMT) { stmt_ann_t ann; if ((ann = stmt_ann (node)) && ann->has_volatile_ops) pp_string (buffer, "{v}"); } pp_space (buffer); dump_generic_node (buffer, GENERIC_TREE_OPERAND (node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string (buffer, "TARGET_EXPR <"); dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false); pp_character (buffer, '>'); break; case DECL_EXPR: print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node) { pp_string (buffer, "if ("); dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false); pp_character (buffer, ')'); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN (node) && (IS_EMPTY_STMT (COND_EXPR_THEN (node)) || TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR) && COND_EXPR_ELSE (node) && (IS_EMPTY_STMT (COND_EXPR_ELSE (node)) || TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR)) { pp_space (buffer); dump_generic_node (buffer, COND_EXPR_THEN (node), 0, flags, true); if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { pp_string (buffer, " else "); dump_generic_node (buffer, COND_EXPR_ELSE (node), 0, flags, true); } } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN (node)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE (node) && !IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { newline_and_indent (buffer, spc); pp_string (buffer, "else"); newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } } is_expr = false; } else { dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '?'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_space (buffer); pp_character (buffer, ':'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); } break; case BIND_EXPR: pp_character (buffer, '{'); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS (node)) { pp_newline (buffer); for (op0 = BIND_EXPR_VARS (node); op0; op0 = TREE_CHAIN (op0)) { print_declaration (buffer, op0, spc+2, flags); pp_newline (buffer); } } newline_and_indent (buffer, spc+2); dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true); newline_and_indent (buffer, spc); pp_character (buffer, '}'); } is_expr = false; break; case CALL_EXPR: print_call_name (buffer, node); /* Print parameters. */ pp_space (buffer); pp_character (buffer, '('); { tree arg; call_expr_arg_iterator iter; FOR_EACH_CALL_EXPR_ARG (arg, iter, node) { dump_generic_node (buffer, arg, spc, flags, false); if (more_call_expr_args_p (&iter)) { pp_character (buffer, ','); pp_space (buffer); } } } if (CALL_EXPR_VA_ARG_PACK (node)) { if (call_expr_nargs (node) > 0) { pp_character (buffer, ','); pp_space (buffer); } pp_string (buffer, "__builtin_va_arg_pack ()"); } pp_character (buffer, ')'); op1 = CALL_EXPR_STATIC_CHAIN (node); if (op1) { pp_string (buffer, " [static-chain: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ']'); } if (CALL_EXPR_RETURN_SLOT_OPT (node)) pp_string (buffer, " [return slot optimization]"); if (CALL_EXPR_TAILCALL (node)) pp_string (buffer, " [tail call]"); break; case STATIC_CHAIN_EXPR: pp_string (buffer, "<<static chain of "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">>"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string (buffer, "<<cleanup_point "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">>"); break; case PLACEHOLDER_EXPR: pp_string (buffer, "<PLACEHOLDER_EXPR "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_character (buffer, '>'); break; /* Binary arithmetic and logic expressions. */ case WIDEN_SUM_EXPR: case WIDEN_MULT_EXPR: case MULT_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol (node); op0 = TREE_OPERAND (node, 0); op1 = TREE_OPERAND (node, 1); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op0) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op0, spc, flags, false); pp_space (buffer); pp_string (buffer, op); pp_space (buffer); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op1) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: if (TREE_CODE (node) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST || TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function pointers. */ else pp_string (buffer, op_symbol (node)); if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); if (TREE_CODE (node) == MISALIGNED_INDIRECT_REF) { pp_string (buffer, "{misalignment: "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '}'); } break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, op_symbol (node)); break; case MIN_EXPR: pp_string (buffer, "MIN_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case MAX_EXPR: pp_string (buffer, "MAX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case ABS_EXPR: pp_string (buffer, "ABS_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case RANGE_EXPR: NIY; break; case FIXED_CONVERT_EXPR: case FIX_TRUNC_EXPR: case FLOAT_EXPR: case CONVERT_EXPR: case NOP_EXPR: type = TREE_TYPE (node); op0 = TREE_OPERAND (node, 0); if (type != TREE_TYPE (op0)) { pp_character (buffer, '('); dump_generic_node (buffer, type, spc, flags, false); pp_string (buffer, ") "); } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); break; case VIEW_CONVERT_EXPR: pp_string (buffer, "VIEW_CONVERT_EXPR<"); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_string (buffer, ">("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case NON_LVALUE_EXPR: pp_string (buffer, "NON_LVALUE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case SAVE_EXPR: pp_string (buffer, "SAVE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case COMPLEX_EXPR: pp_string (buffer, "COMPLEX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case CONJ_EXPR: pp_string (buffer, "CONJ_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case REALPART_EXPR: pp_string (buffer, "REALPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case IMAGPART_EXPR: pp_string (buffer, "IMAGPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case VA_ARG_EXPR: pp_string (buffer, "VA_ARG_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string (buffer, "try"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); newline_and_indent (buffer, spc); pp_string (buffer, (TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CATCH_EXPR: pp_string (buffer, "catch ("); dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false); pp_string (buffer, ")"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case EH_FILTER_EXPR: pp_string (buffer, "<<<eh_filter ("); dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false); pp_string (buffer, ")>>>"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CHANGE_DYNAMIC_TYPE_EXPR: pp_string (buffer, "<<<change_dynamic_type ("); dump_generic_node (buffer, CHANGE_DYNAMIC_TYPE_NEW_TYPE (node), spc + 2, flags, false); pp_string (buffer, ") "); dump_generic_node (buffer, CHANGE_DYNAMIC_TYPE_LOCATION (node), spc + 2, flags, false); pp_string (buffer, ")>>>"); is_expr = false; break; case LABEL_EXPR: op0 = TREE_OPERAND (node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) break; } dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ':'); if (DECL_NONLOCAL (op0)) pp_string (buffer, " [non-local]"); break; case EXC_PTR_EXPR: pp_string (buffer, "<<<exception object>>>"); break; case FILTER_EXPR: pp_string (buffer, "<<<filter object>>>"); break; case LOOP_EXPR: pp_string (buffer, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case RETURN_EXPR: pp_string (buffer, "return"); op0 = TREE_OPERAND (node, 0); if (op0) { pp_space (buffer); if (TREE_CODE (op0) == MODIFY_EXPR || TREE_CODE (op0) == GIMPLE_MODIFY_STMT) dump_generic_node (buffer, GENERIC_TREE_OPERAND (op0, 1), spc, flags, false); else dump_generic_node (buffer, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string (buffer, "if ("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ") break"); break; case SWITCH_EXPR: pp_string (buffer, "switch ("); dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false); pp_character (buffer, ')'); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); if (SWITCH_BODY (node)) { newline_and_indent (buffer, spc+4); dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags, true); } else { tree vec = SWITCH_LABELS (node); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); newline_and_indent (buffer, spc+4); if (elt) { dump_generic_node (buffer, elt, spc+4, flags, false); pp_string (buffer, " goto "); dump_generic_node (buffer, CASE_LABEL (elt), spc+4, flags, true); pp_semicolon (buffer); } else pp_string (buffer, "case ???: goto ???;"); } } newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION (node); if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (buffer, name); break; } } pp_string (buffer, "goto "); dump_generic_node (buffer, op0, spc, flags, false); break; case RESX_EXPR: pp_string (buffer, "resx "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); break; case ASM_EXPR: pp_string (buffer, "__asm__"); if (ASM_VOLATILE_P (node)) pp_string (buffer, " __volatile__"); pp_character (buffer, '('); dump_generic_node (buffer, ASM_STRING (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false); if (ASM_CLOBBERS (node)) { pp_character (buffer, ':'); dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false); } pp_string (buffer, ")"); break; case CASE_LABEL_EXPR: if (CASE_LOW (node) && CASE_HIGH (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); pp_string (buffer, " ... "); dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false); } else if (CASE_LOW (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); } else pp_string (buffer, "default "); pp_character (buffer, ':'); break; case OBJ_TYPE_REF: pp_string (buffer, "OBJ_TYPE_REF("); dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false); pp_character (buffer, ';'); dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false); pp_character (buffer, '-'); pp_character (buffer, '>'); dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false); pp_character (buffer, ')'); break; case PHI_NODE: { int i; dump_generic_node (buffer, PHI_RESULT (node), spc, flags, false); pp_string (buffer, " = PHI <"); for (i = 0; i < PHI_NUM_ARGS (node); i++) { dump_generic_node (buffer, PHI_ARG_DEF (node, i), spc, flags, false); pp_string (buffer, "("); pp_decimal_int (buffer, PHI_ARG_EDGE (node, i)->src->index); pp_string (buffer, ")"); if (i < PHI_NUM_ARGS (node) - 1) pp_string (buffer, ", "); } pp_string (buffer, ">"); if (stmt_references_memory_p (node) && (flags & TDF_MEMSYMS)) dump_symbols (buffer, STORED_SYMS (node), flags); } break; case SSA_NAME: dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false); pp_string (buffer, "_"); pp_decimal_int (buffer, SSA_NAME_VERSION (node)); if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node)) pp_string (buffer, "(ab)"); else if (SSA_NAME_IS_DEFAULT_DEF (node)) pp_string (buffer, "(D)"); break; case WITH_SIZE_EXPR: pp_string (buffer, "WITH_SIZE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case VALUE_HANDLE: pp_printf (buffer, "VH.%d", VALUE_HANDLE_ID (node)); break; case ASSERT_EXPR: pp_string (buffer, "ASSERT_EXPR <"); dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false); pp_string (buffer, ">"); break; case SCEV_KNOWN: pp_string (buffer, "scev_known"); break; case SCEV_NOT_KNOWN: pp_string (buffer, "scev_not_known"); break; case POLYNOMIAL_CHREC: pp_string (buffer, "{"); dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false); pp_string (buffer, ", +, "); dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false); pp_string (buffer, "}_"); dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false); is_stmt = false; break; case REALIGN_LOAD_EXPR: pp_string (buffer, "REALIGN_LOAD <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case VEC_COND_EXPR: pp_string (buffer, " VEC_COND_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case DOT_PROD_EXPR: pp_string (buffer, " DOT_PROD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case OMP_PARALLEL: pp_string (buffer, "#pragma omp parallel"); dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags); if (OMP_PARALLEL_FN (node)) { pp_string (buffer, " [child fn: "); dump_generic_node (buffer, OMP_PARALLEL_FN (node), spc, flags, false); pp_string (buffer, " ("); if (OMP_PARALLEL_DATA_ARG (node)) dump_generic_node (buffer, OMP_PARALLEL_DATA_ARG (node), spc, flags, false); else pp_string (buffer, "???"); pp_string (buffer, ")]"); } dump_omp_body: if (!(flags & TDF_SLIM) && OMP_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } is_expr = false; break; case OMP_FOR: pp_string (buffer, "#pragma omp for"); dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags); if (!(flags & TDF_SLIM)) { if (OMP_FOR_PRE_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); spc += 4; newline_and_indent (buffer, spc); dump_generic_node (buffer, OMP_FOR_PRE_BODY (node), spc, flags, false); } newline_and_indent (buffer, spc); pp_string (buffer, "for ("); dump_generic_node (buffer, OMP_FOR_INIT (node), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, OMP_FOR_COND (node), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, OMP_FOR_INCR (node), spc, flags, false); pp_string (buffer, ")"); if (OMP_FOR_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } if (OMP_FOR_PRE_BODY (node)) { spc -= 4; newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } is_expr = false; break; case OMP_SECTIONS: pp_string (buffer, "#pragma omp sections"); if (OMP_SECTIONS_CONTROL (node)) { pp_string (buffer, " <"); dump_generic_node (buffer, OMP_SECTIONS_CONTROL (node), spc, flags, false); pp_string (buffer, ">"); } dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_SECTIONS_SWITCH: pp_string (buffer, "OMP_SECTIONS_SWITCH"); is_expr = false; break; case OMP_SECTION: pp_string (buffer, "#pragma omp section"); goto dump_omp_body; case OMP_MASTER: pp_string (buffer, "#pragma omp master"); goto dump_omp_body; case OMP_ORDERED: pp_string (buffer, "#pragma omp ordered"); goto dump_omp_body; case OMP_CRITICAL: pp_string (buffer, "#pragma omp critical"); if (OMP_CRITICAL_NAME (node)) { pp_space (buffer); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc, flags, false); pp_character (buffer, ')'); } goto dump_omp_body; case OMP_ATOMIC: pp_string (buffer, "#pragma omp atomic"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_ATOMIC_LOAD: pp_string (buffer, "#pragma omp atomic_load"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); pp_character (buffer, '*'); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_ATOMIC_STORE: pp_string (buffer, "#pragma omp atomic_store ("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case OMP_SINGLE: pp_string (buffer, "#pragma omp single"); dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_RETURN: pp_string (buffer, "OMP_RETURN"); if (OMP_RETURN_NOWAIT (node)) pp_string (buffer, " [nowait]"); is_expr = false; break; case OMP_CONTINUE: pp_string (buffer, "OMP_CONTINUE <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " <- "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); is_expr = false; break; case OMP_CLAUSE: dump_omp_clause (buffer, node, spc, flags); is_expr = false; break; case REDUC_MAX_EXPR: pp_string (buffer, " REDUC_MAX_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_MIN_EXPR: pp_string (buffer, " REDUC_MIN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_PLUS_EXPR: pp_string (buffer, " REDUC_PLUS_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_HI_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_LO_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_HI_EXPR: pp_string (buffer, " VEC_UNPACK_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_LO_EXPR: pp_string (buffer, " VEC_UNPACK_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_HI_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_LO_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_SAT_EXPR: pp_string (buffer, " VEC_PACK_SAT_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_FIX_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_FIX_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case BLOCK: { tree t; pp_string (buffer, "BLOCK"); if (BLOCK_ABSTRACT (node)) pp_string (buffer, " [abstract]"); if (TREE_ASM_WRITTEN (node)) pp_string (buffer, " [written]"); newline_and_indent (buffer, spc + 2); if (BLOCK_SUPERCONTEXT (node)) { pp_string (buffer, "SUPERCONTEXT: "); if (TREE_CODE (BLOCK_SUPERCONTEXT (node)) == BLOCK) pp_printf (buffer, "BLOCK %p", (void *)BLOCK_SUPERCONTEXT (node)); else dump_generic_node (buffer, BLOCK_SUPERCONTEXT (node), 0, flags, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_SUBBLOCKS (node)) { pp_string (buffer, "SUBBLOCKS: "); for (t = BLOCK_SUBBLOCKS (node); t; t = BLOCK_CHAIN (t)) pp_printf (buffer, "%p ", (void *)t); newline_and_indent (buffer, spc + 2); } if (BLOCK_VARS (node)) { pp_string (buffer, "VARS: "); for (t = BLOCK_VARS (node); t; t = TREE_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_ABSTRACT_ORIGIN (node)) { pp_string (buffer, "ABSTRACT_ORIGIN: "); if (TREE_CODE (BLOCK_ABSTRACT_ORIGIN (node)) == BLOCK) pp_printf (buffer, "BLOCK %p", (void *)BLOCK_ABSTRACT_ORIGIN (node)); else dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (node), 0, flags, false); newline_and_indent (buffer, spc + 2); } } break; case VEC_EXTRACT_EVEN_EXPR: pp_string (buffer, " VEC_EXTRACT_EVEN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_EXTRACT_ODD_EXPR: pp_string (buffer, " VEC_EXTRACT_ODD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_INTERLEAVE_HIGH_EXPR: pp_string (buffer, " VEC_INTERLEAVE_HIGH_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_INTERLEAVE_LOW_EXPR: pp_string (buffer, " VEC_INTERLEAVE_LOW_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; default: NIY; } if (is_stmt && is_expr) pp_semicolon (buffer); /* If we're building a diagnostic, the formatted text will be written into BUFFER's stream by the caller; otherwise, write it now. */ if (!(flags & TDF_DIAGNOSTIC)) pp_write_text_to_stream (buffer); return spc; } /* Print the declaration of a variable. */ static void print_declaration (pretty_printer *buffer, tree t, int spc, int flags) { INDENT (spc); if (TREE_CODE (t) == TYPE_DECL) pp_string (buffer, "typedef "); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t)) pp_string (buffer, "register "); if (TREE_PUBLIC (t) && DECL_EXTERNAL (t)) pp_string (buffer, "extern "); else if (TREE_STATIC (t)) pp_string (buffer, "static "); /* Print the type and name. */ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE (t); while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE) tmp = TREE_TYPE (tmp); dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE (t); while (TREE_CODE (tmp) == ARRAY_TYPE) { dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); tmp = TREE_TYPE (tmp); } } else if (TREE_CODE (t) == FUNCTION_DECL) { dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false); pp_space (buffer); dump_decl_name (buffer, t, flags); dump_function_declaration (buffer, TREE_TYPE (t), spc, flags); } else { /* Print type declaration. */ dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); } if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) { pp_string (buffer, " __asm__ "); pp_character (buffer, '('); dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false); pp_character (buffer, ')'); } /* The initial value of a function serves to determine wether the function is declared or defined. So the following does not apply to function nodes. */ if (TREE_CODE (t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL (t)) { pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false); } } if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)) { pp_string (buffer, " [value-expr: "); dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false); pp_character (buffer, ']'); } pp_character (buffer, ';'); } /* Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl (pretty_printer *buffer, const_tree node, int spc, int flags) { /* Print the name of the structure. */ if (TYPE_NAME (node)) { INDENT (spc); if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if ((TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE)) pp_string (buffer, "union "); dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline (buffer); INDENT (spc); pp_character (buffer, '{'); pp_newline (buffer); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS (node); while (tmp) { /* Avoid to print recursively the structure. */ /* FIXME : Not implemented correctly..., what about the case when we have a cycle in the contain graph? ... Maybe this could be solved by looking at the scope in which the structure was declared. */ if (TREE_TYPE (tmp) != node || (TREE_CODE (TREE_TYPE (tmp)) == POINTER_TYPE && TREE_TYPE (TREE_TYPE (tmp)) != node)) { print_declaration (buffer, tmp, spc+2, flags); pp_newline (buffer); } tmp = TREE_CHAIN (tmp); } } INDENT (spc); pp_character (buffer, '}'); } /* Return the priority of the operator OP. From lowest to highest precedence with either left-to-right (L-R) or right-to-left (R-L) associativity]: 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] fn() [] -> . unary +, - and * have higher precedence than the corresponding binary operators. */ static int op_prio (const_tree op) { if (op == NULL) return 9999; switch (TREE_CODE (op)) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: return 11; case WIDEN_SUM_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: return 12; case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case WIDEN_MULT_EXPR: case DOT_PROD_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: case NOP_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case REDUC_MAX_EXPR: case REDUC_MIN_EXPR: case REDUC_PLUS_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case VEC_UNPACK_HI_EXPR: case VEC_UNPACK_LO_EXPR: case VEC_UNPACK_FLOAT_HI_EXPR: case VEC_UNPACK_FLOAT_LO_EXPR: case VEC_PACK_TRUNC_EXPR: case VEC_PACK_SAT_EXPR: return 16; case SAVE_EXPR: case NON_LVALUE_EXPR: return op_prio (TREE_OPERAND (op, 0)); default: /* Return an arbitrarily high precedence to avoid surrounding single VAR_DECLs in ()s. */ return 9999; } } /* Return the symbol associated with operator CODE. */ const char * op_symbol_code (enum tree_code code) { switch (code) { case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case LROTATE_EXPR: return "r<<"; case RROTATE_EXPR: return "r>>"; case VEC_LSHIFT_EXPR: return "v<<"; case VEC_RSHIFT_EXPR: return "v>>"; case POINTER_PLUS_EXPR: return "+"; case PLUS_EXPR: return "+"; case REDUC_PLUS_EXPR: return "r+"; case WIDEN_SUM_EXPR: return "w+"; case WIDEN_MULT_EXPR: return "w*"; case NEGATE_EXPR: case MINUS_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case ALIGN_INDIRECT_REF: return "A*"; case MISALIGNED_INDIRECT_REF: return "M*"; case TRUNC_DIV_EXPR: case RDIV_EXPR: return "/"; case CEIL_DIV_EXPR: return "/[cl]"; case FLOOR_DIV_EXPR: return "/[fl]"; case ROUND_DIV_EXPR: return "/[rd]"; case EXACT_DIV_EXPR: return "/[ex]"; case TRUNC_MOD_EXPR: return "%"; case CEIL_MOD_EXPR: return "%[cl]"; case FLOOR_MOD_EXPR: return "%[fl]"; case ROUND_MOD_EXPR: return "%[rd]"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case MAX_EXPR: return "max"; case MIN_EXPR: return "min"; default: return "<<< ??? >>>"; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol (const_tree op) { return op_symbol_code (TREE_CODE (op)); } /* Prints the name of a CALL_EXPR. */ static void print_call_name (pretty_printer *buffer, const_tree node) { tree op0; gcc_assert (TREE_CODE (node) == CALL_EXPR); op0 = CALL_EXPR_FN (node); if (TREE_CODE (op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND (op0, 0); switch (TREE_CODE (op0)) { case VAR_DECL: case PARM_DECL: dump_function_name (buffer, op0); break; case ADDR_EXPR: case INDIRECT_REF: case NOP_EXPR: dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); break; case COND_EXPR: pp_string (buffer, "("); dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); pp_string (buffer, ") ? "); dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, 0, false); pp_string (buffer, " : "); dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, 0, false); break; case COMPONENT_REF: /* The function is a pointer contained in a structure. */ if (TREE_CODE (TREE_OPERAND (op0, 0)) == INDIRECT_REF || TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 1)); else dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); /* else We can have several levels of structures and a function pointer inside. This is not implemented yet... */ /* NIY;*/ break; case ARRAY_REF: if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 0)); else dump_generic_node (buffer, op0, 0, 0, false); break; case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node (buffer, op0, 0, 0, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string (pretty_printer *buffer, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string (buffer, "\\b"); break; case '\f': pp_string (buffer, "\\f"); break; case '\n': pp_string (buffer, "\\n"); break; case '\r': pp_string (buffer, "\\r"); break; case '\t': pp_string (buffer, "\\t"); break; case '\v': pp_string (buffer, "\\v"); break; case '\\': pp_string (buffer, "\\\\"); break; case '\"': pp_string (buffer, "\\\""); break; case '\'': pp_string (buffer, "\\'"); break; /* No need to handle \0; the loop terminates on \0. */ case '\1': pp_string (buffer, "\\1"); break; case '\2': pp_string (buffer, "\\2"); break; case '\3': pp_string (buffer, "\\3"); break; case '\4': pp_string (buffer, "\\4"); break; case '\5': pp_string (buffer, "\\5"); break; case '\6': pp_string (buffer, "\\6"); break; case '\7': pp_string (buffer, "\\7"); break; default: pp_character (buffer, str[0]); break; } str++; } } static void maybe_init_pretty_print (FILE *file) { if (!initialized) { pp_construct (&buffer, /* prefix */NULL, /* line-width */0); pp_needs_newline (&buffer) = true; initialized = 1; } buffer.buffer->stream = file; } static void newline_and_indent (pretty_printer *buffer, int spc) { pp_newline (buffer); INDENT (spc); } static void dump_vops (pretty_printer *buffer, tree stmt, int spc, int flags) { struct voptype_d *vdefs; struct voptype_d *vuses; int i, n; if (!ssa_operands_active () || !stmt_references_memory_p (stmt)) return; /* Even if the statement doesn't have virtual operators yet, it may contain symbol information (this happens before aliases have been computed). */ if ((flags & TDF_MEMSYMS) && VUSE_OPS (stmt) == NULL && VDEF_OPS (stmt) == NULL) { if (LOADED_SYMS (stmt)) { pp_string (buffer, "# LOADS: "); dump_symbols (buffer, LOADED_SYMS (stmt), flags); newline_and_indent (buffer, spc); } if (STORED_SYMS (stmt)) { pp_string (buffer, "# STORES: "); dump_symbols (buffer, STORED_SYMS (stmt), flags); newline_and_indent (buffer, spc); } return; } vuses = VUSE_OPS (stmt); while (vuses) { pp_string (buffer, "# VUSE <"); n = VUSE_NUM (vuses); for (i = 0; i < n; i++) { dump_generic_node (buffer, VUSE_OP (vuses, i), spc + 2, flags, false); if (i < n - 1) pp_string (buffer, ", "); } pp_string (buffer, ">"); if (flags & TDF_MEMSYMS) dump_symbols (buffer, LOADED_SYMS (stmt), flags); newline_and_indent (buffer, spc); vuses = vuses->next; } vdefs = VDEF_OPS (stmt); while (vdefs) { pp_string (buffer, "# "); dump_generic_node (buffer, VDEF_RESULT (vdefs), spc + 2, flags, false); pp_string (buffer, " = VDEF <"); n = VDEF_NUM (vdefs); for (i = 0; i < n; i++) { dump_generic_node (buffer, VDEF_OP (vdefs, i), spc + 2, flags, 0); if (i < n - 1) pp_string (buffer, ", "); } pp_string (buffer, ">"); if ((flags & TDF_MEMSYMS) && vdefs->next == NULL) dump_symbols (buffer, STORED_SYMS (stmt), flags); newline_and_indent (buffer, spc); vdefs = vdefs->next; } } /* Dumps basic block BB to FILE with details described by FLAGS and indented by INDENT spaces. */ void dump_generic_bb (FILE *file, basic_block bb, int indent, int flags) { maybe_init_pretty_print (file); dump_generic_bb_buff (&buffer, bb, indent, flags); pp_flush (&buffer); } /* Dumps header of basic block BB to buffer BUFFER indented by INDENT spaces and details described by flags. */ static void dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; tree stmt; edge_iterator ei; if (flags & TDF_BLOCKS) { INDENT (indent); pp_string (buffer, "# BLOCK "); pp_decimal_int (buffer, bb->index); if (bb->frequency) { pp_string (buffer, " freq:"); pp_decimal_int (buffer, bb->frequency); } if (bb->count) { pp_string (buffer, " count:"); pp_widest_integer (buffer, bb->count); } if (flags & TDF_LINENO) { block_stmt_iterator bsi; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) if (get_lineno (bsi_stmt (bsi)) != -1) { pp_string (buffer, ", starting at line "); pp_decimal_int (buffer, get_lineno (bsi_stmt (bsi))); break; } } newline_and_indent (buffer, indent); pp_string (buffer, "# PRED:"); pp_write_text_to_stream (buffer); FOR_EACH_EDGE (e, ei, bb->preds) if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->src == ENTRY_BLOCK_PTR) pp_string (buffer, "ENTRY"); else pp_decimal_int (buffer, e->src->index); } else dump_edge_info (buffer->buffer->stream, e, 0); pp_newline (buffer); } else { stmt = first_stmt (bb); if (!stmt || TREE_CODE (stmt) != LABEL_EXPR) { INDENT (indent - 2); pp_string (buffer, "<bb "); pp_decimal_int (buffer, bb->index); pp_string (buffer, ">:"); pp_newline (buffer); } } pp_write_text_to_stream (buffer); check_bb_profile (bb, buffer->buffer->stream); } /* Dumps end of basic block BB to buffer BUFFER indented by INDENT spaces. */ static void dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; INDENT (indent); pp_string (buffer, "# SUCC:"); pp_write_text_to_stream (buffer); FOR_EACH_EDGE (e, ei, bb->succs) if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->dest == EXIT_BLOCK_PTR) pp_string (buffer, "EXIT"); else pp_decimal_int (buffer, e->dest->index); } else dump_edge_info (buffer->buffer->stream, e, 1); pp_newline (buffer); } /* Dump PHI nodes of basic block BB to BUFFER with details described by FLAGS and indented by INDENT spaces. */ static void dump_phi_nodes (pretty_printer *buffer, basic_block bb, int indent, int flags) { tree phi = phi_nodes (bb); if (!phi) return; for (; phi; phi = PHI_CHAIN (phi)) { if (is_gimple_reg (PHI_RESULT (phi)) || (flags & TDF_VOPS)) { INDENT (indent); pp_string (buffer, "# "); dump_generic_node (buffer, phi, indent, flags, false); pp_newline (buffer); } } } /* Dump jump to basic block BB that is represented implicitly in the cfg to BUFFER. */ static void pp_cfg_jump (pretty_printer *buffer, basic_block bb) { tree stmt; stmt = first_stmt (bb); pp_string (buffer, "goto <bb "); pp_decimal_int (buffer, bb->index); pp_string (buffer, ">"); if (stmt && TREE_CODE (stmt) == LABEL_EXPR) { pp_string (buffer, " ("); dump_generic_node (buffer, LABEL_EXPR_LABEL (stmt), 0, 0, false); pp_string (buffer, ")"); } pp_semicolon (buffer); } /* Dump edges represented implicitly in basic block BB to BUFFER, indented by INDENT spaces, with details given by FLAGS. */ static void dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; tree stmt; stmt = last_stmt (bb); if (stmt && TREE_CODE (stmt) == COND_EXPR) { edge true_edge, false_edge; /* When we are emitting the code or changing CFG, it is possible that the edges are not yet created. When we are using debug_bb in such a situation, we do not want it to crash. */ if (EDGE_COUNT (bb->succs) != 2) return; extract_true_false_edges_from_block (bb, &true_edge, &false_edge); INDENT (indent + 2); pp_cfg_jump (buffer, true_edge->dest); newline_and_indent (buffer, indent); pp_string (buffer, "else"); newline_and_indent (buffer, indent + 2); pp_cfg_jump (buffer, false_edge->dest); pp_newline (buffer); return; } /* If there is a fallthru edge, we may need to add an artificial goto to the dump. */ FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) break; if (e && e->dest != bb->next_bb) { INDENT (indent); if ((flags & TDF_LINENO) #ifdef USE_MAPPED_LOCATION && e->goto_locus != UNKNOWN_LOCATION #else && e->goto_locus #endif ) { expanded_location goto_xloc; #ifdef USE_MAPPED_LOCATION goto_xloc = expand_location (e->goto_locus); #else goto_xloc = *e->goto_locus; #endif pp_character (buffer, '['); if (goto_xloc.file) { pp_string (buffer, goto_xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, goto_xloc.line); pp_string (buffer, "] "); } pp_cfg_jump (buffer, e->dest); pp_newline (buffer); } } /* Dumps basic block BB to buffer BUFFER with details described by FLAGS and indented by INDENT spaces. */ static void dump_generic_bb_buff (pretty_printer *buffer, basic_block bb, int indent, int flags) { block_stmt_iterator bsi; tree stmt; int label_indent = indent - 2; if (label_indent < 0) label_indent = 0; dump_bb_header (buffer, bb, indent, flags); dump_phi_nodes (buffer, bb, indent, flags); for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { int curr_indent; stmt = bsi_stmt (bsi); curr_indent = TREE_CODE (stmt) == LABEL_EXPR ? label_indent : indent; INDENT (curr_indent); dump_generic_node (buffer, stmt, curr_indent, flags, true); pp_newline (buffer); dump_histograms_for_stmt (cfun, buffer->buffer->stream, stmt); } dump_implicit_edges (buffer, bb, indent, flags); if (flags & TDF_BLOCKS) dump_bb_end (buffer, bb, indent, flags); }
#include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "output.h" #include "diagnostic.h" #include "real.h" #include "hashtab.h" #include "tree-flow.h" #include "langhooks.h" #include "tree-iterator.h" #include "tree-chrec.h" #include "tree-pass.h" #include "fixed-value.h" #include "value-prof.h" /* Local functions, macros and variables. */ static int op_prio (const_tree); static const char *op_symbol (const_tree); static void pretty_print_string (pretty_printer *, const char*); static void print_call_name (pretty_printer *, const_tree); static void newline_and_indent (pretty_printer *, int); static void maybe_init_pretty_print (FILE *); static void print_declaration (pretty_printer *, tree, int, int); static void print_struct_decl (pretty_printer *, const_tree, int, int); static void do_niy (pretty_printer *, const_tree); static void dump_vops (pretty_printer *, tree, int, int); static void dump_generic_bb_buff (pretty_printer *, basic_block, int, int); #define INDENT(SPACE) do { \ int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0) #define NIY do_niy(buffer,node) #define PRINT_FUNCTION_NAME(NODE) pp_printf \ (buffer, "%s", TREE_CODE (NODE) == NOP_EXPR ? \ lang_hooks.decl_printable_name (TREE_OPERAND (NODE, 0), 1) : \ lang_hooks.decl_printable_name (NODE, 1)) static pretty_printer buffer; static int initialized = 0; /* Try to print something for an unknown tree code. */ static void do_niy (pretty_printer *buffer, const_tree node) { int i, len; pp_string (buffer, "<<< Unknown tree: "); pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]); if (EXPR_P (node)) { len = TREE_OPERAND_LENGTH (node); for (i = 0; i < len; ++i) { newline_and_indent (buffer, 2); dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false); } } pp_string (buffer, " >>>\n"); } /* Debugging function to print out a generic expression. */ void debug_generic_expr (tree t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a generic statement. */ void debug_generic_stmt (tree t) { print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a chain of trees . */ void debug_tree_chain (tree t) { while (t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf(stderr, " "); t = TREE_CHAIN (t); } fprintf (stderr, "\n"); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl (FILE *file, tree decl, int flags) { maybe_init_pretty_print (file); print_declaration (&buffer, decl, 2, flags); pp_write_text_to_stream (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_stmt (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, true); pp_flush (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. The output is indented by INDENT spaces. */ void print_generic_stmt_indented (FILE *file, tree t, int flags, int indent) { int i; maybe_init_pretty_print (file); for (i = 0; i < indent; i++) pp_space (&buffer); dump_generic_node (&buffer, t, indent, flags, true); pp_flush (&buffer); } /* Print a single expression T on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_expr (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, false); } /* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name (pretty_printer *buffer, tree node, int flags) { tree t = node; if (DECL_NAME (t)) pp_tree_identifier (buffer, DECL_NAME (t)); if ((flags & TDF_UID) || DECL_NAME (t) == NULL_TREE) { if (TREE_CODE (t) == LABEL_DECL && LABEL_DECL_UID (t) != -1) pp_printf (buffer, "L.%d", (int) LABEL_DECL_UID (t)); else { char c = TREE_CODE (t) == CONST_DECL ? 'C' : 'D'; pp_printf (buffer, "%c.%u", c, DECL_UID (t)); } } } /* Like the above, but used for pretty printing function calls. */ static void dump_function_name (pretty_printer *buffer, tree node) { if (DECL_NAME (node)) PRINT_FUNCTION_NAME (node); else dump_decl_name (buffer, node, 0); } /* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_function_declaration (pretty_printer *buffer, tree node, int spc, int flags) { bool wrote_arg = false; tree arg; pp_space (buffer); pp_character (buffer, '('); /* Print the argument types. The last element in the list is a VOID_TYPE. The following avoids printing the last element. */ arg = TYPE_ARG_TYPES (node); while (arg && TREE_CHAIN (arg) && arg != error_mark_node) { wrote_arg = true; dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false); arg = TREE_CHAIN (arg); if (TREE_CHAIN (arg) && TREE_CODE (TREE_CHAIN (arg)) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } if (!wrote_arg) pp_string (buffer, "void"); pp_character (buffer, ')'); } /* Dump the domain associated with an array. */ static void dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags) { pp_character (buffer, '['); if (domain) { tree min = TYPE_MIN_VALUE (domain); tree max = TYPE_MAX_VALUE (domain); if (min && max && integer_zerop (min) && host_integerp (max, 0)) pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1); else { if (min) dump_generic_node (buffer, min, spc, flags, false); pp_character (buffer, ':'); if (max) dump_generic_node (buffer, max, spc, flags, false); } } else pp_string (buffer, "<unknown>"); pp_character (buffer, ']'); } /* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags) { const char *name; switch (OMP_CLAUSE_CODE (clause)) { case OMP_CLAUSE_PRIVATE: name = "private"; goto print_remap; case OMP_CLAUSE_SHARED: name = "shared"; goto print_remap; case OMP_CLAUSE_FIRSTPRIVATE: name = "firstprivate"; goto print_remap; case OMP_CLAUSE_LASTPRIVATE: name = "lastprivate"; goto print_remap; case OMP_CLAUSE_COPYIN: name = "copyin"; goto print_remap; case OMP_CLAUSE_COPYPRIVATE: name = "copyprivate"; goto print_remap; print_remap: pp_string (buffer, name); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_REDUCTION: pp_string (buffer, "reduction("); pp_string (buffer, op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause))); pp_character (buffer, ':'); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_IF: pp_string (buffer, "if("); dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NUM_THREADS: pp_string (buffer, "num_threads("); dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NOWAIT: pp_string (buffer, "nowait"); break; case OMP_CLAUSE_ORDERED: pp_string (buffer, "ordered"); break; case OMP_CLAUSE_DEFAULT: pp_string (buffer, "default("); switch (OMP_CLAUSE_DEFAULT_KIND (clause)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: pp_string (buffer, "shared"); break; case OMP_CLAUSE_DEFAULT_NONE: pp_string (buffer, "none"); break; case OMP_CLAUSE_DEFAULT_PRIVATE: pp_string (buffer, "private"); break; default: gcc_unreachable (); } pp_character (buffer, ')'); break; case OMP_CLAUSE_SCHEDULE: pp_string (buffer, "schedule("); switch (OMP_CLAUSE_SCHEDULE_KIND (clause)) { case OMP_CLAUSE_SCHEDULE_STATIC: pp_string (buffer, "static"); break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: pp_string (buffer, "dynamic"); break; case OMP_CLAUSE_SCHEDULE_GUIDED: pp_string (buffer, "guided"); break; case OMP_CLAUSE_SCHEDULE_RUNTIME: pp_string (buffer, "runtime"); break; default: gcc_unreachable (); } if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause)) { pp_character (buffer, ','); dump_generic_node (buffer, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause), spc, flags, false); } pp_character (buffer, ')'); break; default: /* Should never happen. */ dump_generic_node (buffer, clause, spc, flags, false); break; } } /* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags) { if (clause == NULL) return; pp_space (buffer); while (1) { dump_omp_clause (buffer, clause, spc, flags); clause = OMP_CLAUSE_CHAIN (clause); if (clause == NULL) return; pp_space (buffer); } } /* Dump the set of decls SYMS. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_symbols (pretty_printer *buffer, bitmap syms, int flags) { unsigned i; bitmap_iterator bi; if (syms == NULL) pp_string (buffer, "NIL"); else { pp_string (buffer, " { "); EXECUTE_IF_SET_IN_BITMAP (syms, 0, i, bi) { tree sym = referenced_var_lookup (i); dump_generic_node (buffer, sym, 0, flags, false); pp_string (buffer, " "); } pp_string (buffer, "}"); } } /* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). If IS_STMT is true, the object printed is considered to be a statement and it is terminated by ';' if appropriate. */ int dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; if (node == NULL_TREE) return spc; is_expr = EXPR_P (node) || GIMPLE_STMT_P (node); /* We use has_stmt_ann because CALL_EXPR can be both an expression and a statement, and we have no guarantee that it will have a stmt_ann when it is used as an RHS expression. stmt_ann will assert if you call it on something with a non-stmt annotation attached. */ if (TREE_CODE (node) != ERROR_MARK && is_gimple_stmt (node) && (flags & (TDF_VOPS|TDF_MEMSYMS)) && has_stmt_ann (node) && TREE_CODE (node) != PHI_NODE) dump_vops (buffer, node, spc, flags); if (is_stmt && (flags & TDF_STMTADDR)) pp_printf (buffer, "<&%p> ", (void *)node); if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node)) { expanded_location xloc = expand_location (EXPR_LOCATION (node)); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, "] "); } switch (TREE_CODE (node)) { case ERROR_MARK: pp_string (buffer, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier (buffer, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE (node)) { dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false); pp_space (buffer); } dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false); node = TREE_CHAIN (node); if (node && TREE_CODE (node) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } break; case TREE_BINFO: dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false); case TREE_VEC: { size_t i; if (TREE_VEC_LENGTH (node) > 0) { size_t len = TREE_VEC_LENGTH (node); for (i = 0; i < len - 1; i++) { dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); } dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc, flags, false); } } break; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: { unsigned int quals = TYPE_QUALS (node); enum tree_code_class class; if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, "restrict "); class = TREE_CODE_CLASS (TREE_CODE (node)); if (class == tcc_declaration) { if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else pp_string (buffer, "<unnamed type decl>"); } else if (class == tcc_type) { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) pp_tree_identifier (buffer, TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_string (buffer, "<unnamed type>"); } else if (TREE_CODE (node) == VECTOR_TYPE) { pp_string (buffer, "vector "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == INTEGER_TYPE) { pp_string (buffer, (TYPE_UNSIGNED (node) ? "<unnamed-unsigned:" : "<unnamed-signed:")); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else pp_string (buffer, "<unnamed type>"); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&"); if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE (node); dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false); pp_space (buffer); pp_character (buffer, '('); pp_string (buffer, str); if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); pp_character (buffer, ')'); dump_function_declaration (buffer, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS (node); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); pp_string (buffer, str); if (quals & TYPE_QUAL_CONST) pp_string (buffer, " const"); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, " volatile"); if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, " restrict"); if (TYPE_REF_CAN_ALIAS_ALL (node)) pp_string (buffer, " {ref-all}"); } break; case OFFSET_TYPE: NIY; break; case METHOD_TYPE: dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), flags); pp_string (buffer, "::"); break; case TARGET_MEM_REF: { const char *sep = ""; tree tmp; pp_string (buffer, "MEM["); tmp = TMR_SYMBOL (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "symbol: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_BASE (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "base: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_INDEX (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "index: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_STEP (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "step: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_OFFSET (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "offset: "); dump_generic_node (buffer, tmp, spc, flags, false); } pp_string (buffer, "]"); if (flags & TDF_DETAILS) { pp_string (buffer, "{"); dump_generic_node (buffer, TMR_ORIGINAL (node), spc, flags, false); pp_string (buffer, "}"); } } break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) ; dump_generic_node (buffer, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { unsigned int quals = TYPE_QUALS (node); if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); /* Print the name of the structure. */ if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if (TREE_CODE (node) == UNION_TYPE) pp_string (buffer, "union "); if (TYPE_NAME (node)) dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false); else print_struct_decl (buffer, node, spc, flags); break; } case LANG_TYPE: NIY; break; case INTEGER_CST: if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE) { /* In the case of a pointer, one may want to divide by the size of the pointed-to type. Unfortunately, this not straightforward. The C front-end maps expressions (int *) 5 int *p; (p + 5) in such a way that the two INTEGER_CST nodes for "5" have different values but identical types. In the latter case, the 5 is multiplied by sizeof (int) in c-common.c (pointer_int_sum) to convert it to a byte address, and yet the type of the node is left unchanged. Argh. What is consistent though is that the number value corresponds to bytes (UNITS) offset. NB: Neither of the following divisors can be trivially used to recover the original literal: TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); pp_string (buffer, "B"); /* pseudo-unit */ } else if (! host_integerp (node, 0)) { tree val = node; unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val); HOST_WIDE_INT high = TREE_INT_CST_HIGH (val); if (tree_int_cst_sgn (val) < 0) { pp_character (buffer, '-'); high = ~high + !low; low = -low; } /* Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ sprintf (pp_buffer (buffer)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, high, low); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } else pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) pp_string (buffer, " overflow"); #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) pp_string (buffer, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf"); else if (REAL_VALUE_ISNAN (d)) pp_string (buffer, " Nan"); else { char string[100]; real_to_decimal (string, &d, sizeof (string), 0, 1); pp_string (buffer, string); } #else { HOST_WIDE_INT i; unsigned char *p = (unsigned char *) &TREE_REAL_CST (node); pp_string (buffer, "0x"); for (i = 0; i < sizeof TREE_REAL_CST (node); i++) output_formatted_integer (buffer, "%02x", *p++); } #endif break; } case FIXED_CST: { char string[100]; fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string)); pp_string (buffer, string); break; } case COMPLEX_CST: pp_string (buffer, "__complex__ ("); dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false); pp_string (buffer, ")"); break; case STRING_CST: pp_string (buffer, "\""); pretty_print_string (buffer, TREE_STRING_POINTER (node)); pp_string (buffer, "\""); break; case VECTOR_CST: { tree elt; pp_string (buffer, "{ "); for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt)) { dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false); if (TREE_CHAIN (elt)) pp_string (buffer, ", "); } pp_string (buffer, " }"); } break; case FUNCTION_TYPE: break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name (buffer, node, flags); break; case LABEL_DECL: if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else if (LABEL_DECL_UID (node) != -1) pp_printf (buffer, "<L%d>", (int) LABEL_DECL_UID (node)); else pp_printf (buffer, "<D.%u>", DECL_UID (node)); break; case TYPE_DECL: if (DECL_IS_BUILTIN (node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else { if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) && TYPE_METHODS (TREE_TYPE (node))) { /* The type is a c++ class: all structures have at least 4 methods. */ pp_string (buffer, "class "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else { pp_string (buffer, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } } break; case SYMBOL_MEMORY_TAG: case NAME_MEMORY_TAG: case STRUCT_FIELD_TAG: case VAR_DECL: case PARM_DECL: case FIELD_DECL: case NAMESPACE_DECL: case MEMORY_PARTITION_TAG: dump_decl_name (buffer, node, flags); break; case RESULT_DECL: pp_string (buffer, "<retval>"); break; case COMPONENT_REF: op0 = TREE_OPERAND (node, 0); str = "."; if (TREE_CODE (op0) == INDIRECT_REF) { op0 = TREE_OPERAND (op0, 0); str = "->"; } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_string (buffer, str); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (op0) != VALUE_HANDLE) { op0 = component_ref_field_offset (node); if (op0 && TREE_CODE (op0) != INTEGER_CST) { pp_string (buffer, "{off: "); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, '}'); } } break; case BIT_FIELD_REF: pp_string (buffer, "BIT_FIELD_REF <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND (node, 0); if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_character (buffer, '['); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (node) == ARRAY_RANGE_REF) pp_string (buffer, " ..."); pp_character (buffer, ']'); op0 = array_ref_low_bound (node); op1 = array_ref_element_size (node); if (!integer_zerop (op0) || TREE_OPERAND (node, 2) || TREE_OPERAND (node, 3)) { pp_string (buffer, "{lb: "); dump_generic_node (buffer, op0, spc, flags, false); pp_string (buffer, " sz: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, '}'); } break; case CONSTRUCTOR: { unsigned HOST_WIDE_INT ix; tree field, val; bool is_struct_init = FALSE; pp_character (buffer, '{'); if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) is_struct_init = TRUE; FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { if (field && is_struct_init) { pp_character (buffer, '.'); dump_generic_node (buffer, field, spc, flags, false); pp_string (buffer, "="); } if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); if (val && TREE_CODE (val) == FUNCTION_DECL) dump_decl_name (buffer, val, flags); else dump_generic_node (buffer, val, spc, flags, false); if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1) { pp_character (buffer, ','); pp_space (buffer); } } pp_character (buffer, '}'); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string (buffer, "<COMPOUND_EXPR>"); break; } dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } for (tp = &TREE_OPERAND (node, 1); TREE_CODE (*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND (*tp, 1)) { dump_generic_node (buffer, TREE_OPERAND (*tp, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } } dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM)); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if (flags & TDF_SLIM) { pp_string (buffer, "<STATEMENT_LIST>"); break; } for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si)) { if (!first) newline_and_indent (buffer, spc); else first = false; dump_generic_node (buffer, tsi_stmt (si), spc, flags, true); } } break; case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: case INIT_EXPR: dump_generic_node (buffer, GENERIC_TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); if (TREE_CODE (node) == GIMPLE_MODIFY_STMT && MOVE_NONTEMPORAL (node)) pp_string (buffer, "{nt}"); if (TREE_CODE (node) == GIMPLE_MODIFY_STMT) { stmt_ann_t ann; if ((ann = stmt_ann (node)) && ann->has_volatile_ops) pp_string (buffer, "{v}"); } pp_space (buffer); dump_generic_node (buffer, GENERIC_TREE_OPERAND (node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string (buffer, "TARGET_EXPR <"); dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false); pp_character (buffer, '>'); break; case DECL_EXPR: print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node) { pp_string (buffer, "if ("); dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false); pp_character (buffer, ')'); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN (node) && (IS_EMPTY_STMT (COND_EXPR_THEN (node)) || TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR) && COND_EXPR_ELSE (node) && (IS_EMPTY_STMT (COND_EXPR_ELSE (node)) || TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR)) { pp_space (buffer); dump_generic_node (buffer, COND_EXPR_THEN (node), 0, flags, true); if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { pp_string (buffer, " else "); dump_generic_node (buffer, COND_EXPR_ELSE (node), 0, flags, true); } } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN (node)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE (node) && !IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { newline_and_indent (buffer, spc); pp_string (buffer, "else"); newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } } is_expr = false; } else { dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '?'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_space (buffer); pp_character (buffer, ':'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); } break; case BIND_EXPR: pp_character (buffer, '{'); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS (node)) { pp_newline (buffer); for (op0 = BIND_EXPR_VARS (node); op0; op0 = TREE_CHAIN (op0)) { print_declaration (buffer, op0, spc+2, flags); pp_newline (buffer); } } newline_and_indent (buffer, spc+2); dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true); newline_and_indent (buffer, spc); pp_character (buffer, '}'); } is_expr = false; break; case CALL_EXPR: print_call_name (buffer, node); /* Print parameters. */ pp_space (buffer); pp_character (buffer, '('); { tree arg; call_expr_arg_iterator iter; FOR_EACH_CALL_EXPR_ARG (arg, iter, node) { dump_generic_node (buffer, arg, spc, flags, false); if (more_call_expr_args_p (&iter)) { pp_character (buffer, ','); pp_space (buffer); } } } if (CALL_EXPR_VA_ARG_PACK (node)) { if (call_expr_nargs (node) > 0) { pp_character (buffer, ','); pp_space (buffer); } pp_string (buffer, "__builtin_va_arg_pack ()"); } pp_character (buffer, ')'); op1 = CALL_EXPR_STATIC_CHAIN (node); if (op1) { pp_string (buffer, " [static-chain: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ']'); } if (CALL_EXPR_RETURN_SLOT_OPT (node)) pp_string (buffer, " [return slot optimization]"); if (CALL_EXPR_TAILCALL (node)) pp_string (buffer, " [tail call]"); break; case STATIC_CHAIN_EXPR: pp_string (buffer, "<<static chain of "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">>"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string (buffer, "<<cleanup_point "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">>"); break; case PLACEHOLDER_EXPR: pp_string (buffer, "<PLACEHOLDER_EXPR "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_character (buffer, '>'); break; /* Binary arithmetic and logic expressions. */ case WIDEN_SUM_EXPR: case WIDEN_MULT_EXPR: case MULT_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol (node); op0 = TREE_OPERAND (node, 0); op1 = TREE_OPERAND (node, 1); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op0) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op0, spc, flags, false); pp_space (buffer); pp_string (buffer, op); pp_space (buffer); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op1) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: if (TREE_CODE (node) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST || TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function pointers. */ else pp_string (buffer, op_symbol (node)); if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); if (TREE_CODE (node) == MISALIGNED_INDIRECT_REF) { pp_string (buffer, "{misalignment: "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '}'); } break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, op_symbol (node)); break; case MIN_EXPR: pp_string (buffer, "MIN_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case MAX_EXPR: pp_string (buffer, "MAX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case ABS_EXPR: pp_string (buffer, "ABS_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case RANGE_EXPR: NIY; break; case FIXED_CONVERT_EXPR: case FIX_TRUNC_EXPR: case FLOAT_EXPR: case CONVERT_EXPR: case NOP_EXPR: type = TREE_TYPE (node); op0 = TREE_OPERAND (node, 0); if (type != TREE_TYPE (op0)) { pp_character (buffer, '('); dump_generic_node (buffer, type, spc, flags, false); pp_string (buffer, ") "); } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); break; case VIEW_CONVERT_EXPR: pp_string (buffer, "VIEW_CONVERT_EXPR<"); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_string (buffer, ">("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case NON_LVALUE_EXPR: pp_string (buffer, "NON_LVALUE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case SAVE_EXPR: pp_string (buffer, "SAVE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case COMPLEX_EXPR: pp_string (buffer, "COMPLEX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case CONJ_EXPR: pp_string (buffer, "CONJ_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case REALPART_EXPR: pp_string (buffer, "REALPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case IMAGPART_EXPR: pp_string (buffer, "IMAGPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case VA_ARG_EXPR: pp_string (buffer, "VA_ARG_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string (buffer, "try"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); newline_and_indent (buffer, spc); pp_string (buffer, (TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CATCH_EXPR: pp_string (buffer, "catch ("); dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false); pp_string (buffer, ")"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case EH_FILTER_EXPR: pp_string (buffer, "<<<eh_filter ("); dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false); pp_string (buffer, ")>>>"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CHANGE_DYNAMIC_TYPE_EXPR: pp_string (buffer, "<<<change_dynamic_type ("); dump_generic_node (buffer, CHANGE_DYNAMIC_TYPE_NEW_TYPE (node), spc + 2, flags, false); pp_string (buffer, ") "); dump_generic_node (buffer, CHANGE_DYNAMIC_TYPE_LOCATION (node), spc + 2, flags, false); pp_string (buffer, ")>>>"); is_expr = false; break; case LABEL_EXPR: op0 = TREE_OPERAND (node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) break; } dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ':'); if (DECL_NONLOCAL (op0)) pp_string (buffer, " [non-local]"); break; case EXC_PTR_EXPR: pp_string (buffer, "<<<exception object>>>"); break; case FILTER_EXPR: pp_string (buffer, "<<<filter object>>>"); break; case LOOP_EXPR: pp_string (buffer, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case RETURN_EXPR: pp_string (buffer, "return"); op0 = TREE_OPERAND (node, 0); if (op0) { pp_space (buffer); if (TREE_CODE (op0) == MODIFY_EXPR || TREE_CODE (op0) == GIMPLE_MODIFY_STMT) dump_generic_node (buffer, GENERIC_TREE_OPERAND (op0, 1), spc, flags, false); else dump_generic_node (buffer, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string (buffer, "if ("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ") break"); break; case SWITCH_EXPR: pp_string (buffer, "switch ("); dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false); pp_character (buffer, ')'); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); if (SWITCH_BODY (node)) { newline_and_indent (buffer, spc+4); dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags, true); } else { tree vec = SWITCH_LABELS (node); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); newline_and_indent (buffer, spc+4); if (elt) { dump_generic_node (buffer, elt, spc+4, flags, false); pp_string (buffer, " goto "); dump_generic_node (buffer, CASE_LABEL (elt), spc+4, flags, true); pp_semicolon (buffer); } else pp_string (buffer, "case ???: goto ???;"); } } newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION (node); if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (buffer, name); break; } } pp_string (buffer, "goto "); dump_generic_node (buffer, op0, spc, flags, false); break; case RESX_EXPR: pp_string (buffer, "resx "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); break; case ASM_EXPR: pp_string (buffer, "__asm__"); if (ASM_VOLATILE_P (node)) pp_string (buffer, " __volatile__"); pp_character (buffer, '('); dump_generic_node (buffer, ASM_STRING (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false); if (ASM_CLOBBERS (node)) { pp_character (buffer, ':'); dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false); } pp_string (buffer, ")"); break; case CASE_LABEL_EXPR: if (CASE_LOW (node) && CASE_HIGH (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); pp_string (buffer, " ... "); dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false); } else if (CASE_LOW (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); } else pp_string (buffer, "default "); pp_character (buffer, ':'); break; case OBJ_TYPE_REF: pp_string (buffer, "OBJ_TYPE_REF("); dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false); pp_character (buffer, ';'); dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false); pp_character (buffer, '-'); pp_character (buffer, '>'); dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false); pp_character (buffer, ')'); break; case PHI_NODE: { int i; dump_generic_node (buffer, PHI_RESULT (node), spc, flags, false); pp_string (buffer, " = PHI <"); for (i = 0; i < PHI_NUM_ARGS (node); i++) { dump_generic_node (buffer, PHI_ARG_DEF (node, i), spc, flags, false); pp_string (buffer, "("); pp_decimal_int (buffer, PHI_ARG_EDGE (node, i)->src->index); pp_string (buffer, ")"); if (i < PHI_NUM_ARGS (node) - 1) pp_string (buffer, ", "); } pp_string (buffer, ">"); if (stmt_references_memory_p (node) && (flags & TDF_MEMSYMS)) dump_symbols (buffer, STORED_SYMS (node), flags); } break; case SSA_NAME: dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false); pp_string (buffer, "_"); pp_decimal_int (buffer, SSA_NAME_VERSION (node)); if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node)) pp_string (buffer, "(ab)"); else if (SSA_NAME_IS_DEFAULT_DEF (node)) pp_string (buffer, "(D)"); break; case WITH_SIZE_EXPR: pp_string (buffer, "WITH_SIZE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case VALUE_HANDLE: pp_printf (buffer, "VH.%d", VALUE_HANDLE_ID (node)); break; case ASSERT_EXPR: pp_string (buffer, "ASSERT_EXPR <"); dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false); pp_string (buffer, ">"); break; case SCEV_KNOWN: pp_string (buffer, "scev_known"); break; case SCEV_NOT_KNOWN: pp_string (buffer, "scev_not_known"); break; case POLYNOMIAL_CHREC: pp_string (buffer, "{"); dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false); pp_string (buffer, ", +, "); dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false); pp_string (buffer, "}_"); dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false); is_stmt = false; break; case REALIGN_LOAD_EXPR: pp_string (buffer, "REALIGN_LOAD <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case VEC_COND_EXPR: pp_string (buffer, " VEC_COND_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case DOT_PROD_EXPR: pp_string (buffer, " DOT_PROD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case OMP_PARALLEL: pp_string (buffer, " dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags); if (OMP_PARALLEL_FN (node)) { pp_string (buffer, " [child fn: "); dump_generic_node (buffer, OMP_PARALLEL_FN (node), spc, flags, false); pp_string (buffer, " ("); if (OMP_PARALLEL_DATA_ARG (node)) dump_generic_node (buffer, OMP_PARALLEL_DATA_ARG (node), spc, flags, false); else pp_string (buffer, "???"); pp_string (buffer, ")]"); } dump_omp_body: if (!(flags & TDF_SLIM) && OMP_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } is_expr = false; break; case OMP_FOR: pp_string (buffer, " dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags); if (!(flags & TDF_SLIM)) { if (OMP_FOR_PRE_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); spc += 4; newline_and_indent (buffer, spc); dump_generic_node (buffer, OMP_FOR_PRE_BODY (node), spc, flags, false); } newline_and_indent (buffer, spc); pp_string (buffer, "for ("); dump_generic_node (buffer, OMP_FOR_INIT (node), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, OMP_FOR_COND (node), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, OMP_FOR_INCR (node), spc, flags, false); pp_string (buffer, ")"); if (OMP_FOR_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } if (OMP_FOR_PRE_BODY (node)) { spc -= 4; newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } is_expr = false; break; case OMP_SECTIONS: pp_string (buffer, " if (OMP_SECTIONS_CONTROL (node)) { pp_string (buffer, " <"); dump_generic_node (buffer, OMP_SECTIONS_CONTROL (node), spc, flags, false); pp_string (buffer, ">"); } dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_SECTIONS_SWITCH: pp_string (buffer, "OMP_SECTIONS_SWITCH"); is_expr = false; break; case OMP_SECTION: pp_string (buffer, " goto dump_omp_body; case OMP_MASTER: pp_string (buffer, " goto dump_omp_body; case OMP_ORDERED: pp_string (buffer, " goto dump_omp_body; case OMP_CRITICAL: pp_string (buffer, " if (OMP_CRITICAL_NAME (node)) { pp_space (buffer); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc, flags, false); pp_character (buffer, ')'); } goto dump_omp_body; case OMP_ATOMIC: pp_string (buffer, " newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_ATOMIC_LOAD: pp_string (buffer, " newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); pp_character (buffer, '*'); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_ATOMIC_STORE: pp_string (buffer, " dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case OMP_SINGLE: pp_string (buffer, " dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_RETURN: pp_string (buffer, "OMP_RETURN"); if (OMP_RETURN_NOWAIT (node)) pp_string (buffer, " [nowait]"); is_expr = false; break; case OMP_CONTINUE: pp_string (buffer, "OMP_CONTINUE <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " <- "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); is_expr = false; break; case OMP_CLAUSE: dump_omp_clause (buffer, node, spc, flags); is_expr = false; break; case REDUC_MAX_EXPR: pp_string (buffer, " REDUC_MAX_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_MIN_EXPR: pp_string (buffer, " REDUC_MIN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_PLUS_EXPR: pp_string (buffer, " REDUC_PLUS_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_HI_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_LO_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_HI_EXPR: pp_string (buffer, " VEC_UNPACK_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_LO_EXPR: pp_string (buffer, " VEC_UNPACK_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_HI_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_LO_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_SAT_EXPR: pp_string (buffer, " VEC_PACK_SAT_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_FIX_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_FIX_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case BLOCK: { tree t; pp_string (buffer, "BLOCK"); if (BLOCK_ABSTRACT (node)) pp_string (buffer, " [abstract]"); if (TREE_ASM_WRITTEN (node)) pp_string (buffer, " [written]"); newline_and_indent (buffer, spc + 2); if (BLOCK_SUPERCONTEXT (node)) { pp_string (buffer, "SUPERCONTEXT: "); if (TREE_CODE (BLOCK_SUPERCONTEXT (node)) == BLOCK) pp_printf (buffer, "BLOCK %p", (void *)BLOCK_SUPERCONTEXT (node)); else dump_generic_node (buffer, BLOCK_SUPERCONTEXT (node), 0, flags, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_SUBBLOCKS (node)) { pp_string (buffer, "SUBBLOCKS: "); for (t = BLOCK_SUBBLOCKS (node); t; t = BLOCK_CHAIN (t)) pp_printf (buffer, "%p ", (void *)t); newline_and_indent (buffer, spc + 2); } if (BLOCK_VARS (node)) { pp_string (buffer, "VARS: "); for (t = BLOCK_VARS (node); t; t = TREE_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_ABSTRACT_ORIGIN (node)) { pp_string (buffer, "ABSTRACT_ORIGIN: "); if (TREE_CODE (BLOCK_ABSTRACT_ORIGIN (node)) == BLOCK) pp_printf (buffer, "BLOCK %p", (void *)BLOCK_ABSTRACT_ORIGIN (node)); else dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (node), 0, flags, false); newline_and_indent (buffer, spc + 2); } } break; case VEC_EXTRACT_EVEN_EXPR: pp_string (buffer, " VEC_EXTRACT_EVEN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_EXTRACT_ODD_EXPR: pp_string (buffer, " VEC_EXTRACT_ODD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_INTERLEAVE_HIGH_EXPR: pp_string (buffer, " VEC_INTERLEAVE_HIGH_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_INTERLEAVE_LOW_EXPR: pp_string (buffer, " VEC_INTERLEAVE_LOW_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; default: NIY; } if (is_stmt && is_expr) pp_semicolon (buffer); /* If we're building a diagnostic, the formatted text will be written into BUFFER's stream by the caller; otherwise, write it now. */ if (!(flags & TDF_DIAGNOSTIC)) pp_write_text_to_stream (buffer); return spc; } /* Print the declaration of a variable. */ static void print_declaration (pretty_printer *buffer, tree t, int spc, int flags) { INDENT (spc); if (TREE_CODE (t) == TYPE_DECL) pp_string (buffer, "typedef "); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t)) pp_string (buffer, "register "); if (TREE_PUBLIC (t) && DECL_EXTERNAL (t)) pp_string (buffer, "extern "); else if (TREE_STATIC (t)) pp_string (buffer, "static "); /* Print the type and name. */ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE (t); while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE) tmp = TREE_TYPE (tmp); dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE (t); while (TREE_CODE (tmp) == ARRAY_TYPE) { dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); tmp = TREE_TYPE (tmp); } } else if (TREE_CODE (t) == FUNCTION_DECL) { dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false); pp_space (buffer); dump_decl_name (buffer, t, flags); dump_function_declaration (buffer, TREE_TYPE (t), spc, flags); } else { /* Print type declaration. */ dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); } if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) { pp_string (buffer, " __asm__ "); pp_character (buffer, '('); dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false); pp_character (buffer, ')'); } /* The initial value of a function serves to determine wether the function is declared or defined. So the following does not apply to function nodes. */ if (TREE_CODE (t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL (t)) { pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false); } } if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)) { pp_string (buffer, " [value-expr: "); dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false); pp_character (buffer, ']'); } pp_character (buffer, ';'); } /* Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl (pretty_printer *buffer, const_tree node, int spc, int flags) { /* Print the name of the structure. */ if (TYPE_NAME (node)) { INDENT (spc); if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if ((TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE)) pp_string (buffer, "union "); dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline (buffer); INDENT (spc); pp_character (buffer, '{'); pp_newline (buffer); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS (node); while (tmp) { /* Avoid to print recursively the structure. */ /* FIXME : Not implemented correctly..., what about the case when we have a cycle in the contain graph? ... Maybe this could be solved by looking at the scope in which the structure was declared. */ if (TREE_TYPE (tmp) != node || (TREE_CODE (TREE_TYPE (tmp)) == POINTER_TYPE && TREE_TYPE (TREE_TYPE (tmp)) != node)) { print_declaration (buffer, tmp, spc+2, flags); pp_newline (buffer); } tmp = TREE_CHAIN (tmp); } } INDENT (spc); pp_character (buffer, '}'); } /* Return the priority of the operator OP. From lowest to highest precedence with either left-to-right (L-R) or right-to-left (R-L) associativity]: 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] fn() [] -> . unary +, - and * have higher precedence than the corresponding binary operators. */ static int op_prio (const_tree op) { if (op == NULL) return 9999; switch (TREE_CODE (op)) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: return 11; case WIDEN_SUM_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: return 12; case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case WIDEN_MULT_EXPR: case DOT_PROD_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: case NOP_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case REDUC_MAX_EXPR: case REDUC_MIN_EXPR: case REDUC_PLUS_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case VEC_UNPACK_HI_EXPR: case VEC_UNPACK_LO_EXPR: case VEC_UNPACK_FLOAT_HI_EXPR: case VEC_UNPACK_FLOAT_LO_EXPR: case VEC_PACK_TRUNC_EXPR: case VEC_PACK_SAT_EXPR: return 16; case SAVE_EXPR: case NON_LVALUE_EXPR: return op_prio (TREE_OPERAND (op, 0)); default: /* Return an arbitrarily high precedence to avoid surrounding single VAR_DECLs in ()s. */ return 9999; } } /* Return the symbol associated with operator CODE. */ const char * op_symbol_code (enum tree_code code) { switch (code) { case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case LROTATE_EXPR: return "r<<"; case RROTATE_EXPR: return "r>>"; case VEC_LSHIFT_EXPR: return "v<<"; case VEC_RSHIFT_EXPR: return "v>>"; case POINTER_PLUS_EXPR: return "+"; case PLUS_EXPR: return "+"; case REDUC_PLUS_EXPR: return "r+"; case WIDEN_SUM_EXPR: return "w+"; case WIDEN_MULT_EXPR: return "w*"; case NEGATE_EXPR: case MINUS_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case ALIGN_INDIRECT_REF: return "A*"; case MISALIGNED_INDIRECT_REF: return "M*"; case TRUNC_DIV_EXPR: case RDIV_EXPR: return "/"; case CEIL_DIV_EXPR: return "/[cl]"; case FLOOR_DIV_EXPR: return "/[fl]"; case ROUND_DIV_EXPR: return "/[rd]"; case EXACT_DIV_EXPR: return "/[ex]"; case TRUNC_MOD_EXPR: return "%"; case CEIL_MOD_EXPR: return "%[cl]"; case FLOOR_MOD_EXPR: return "%[fl]"; case ROUND_MOD_EXPR: return "%[rd]"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case MAX_EXPR: return "max"; case MIN_EXPR: return "min"; default: return "<<< ??? >>>"; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol (const_tree op) { return op_symbol_code (TREE_CODE (op)); } /* Prints the name of a CALL_EXPR. */ static void print_call_name (pretty_printer *buffer, const_tree node) { tree op0; gcc_assert (TREE_CODE (node) == CALL_EXPR); op0 = CALL_EXPR_FN (node); if (TREE_CODE (op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND (op0, 0); switch (TREE_CODE (op0)) { case VAR_DECL: case PARM_DECL: dump_function_name (buffer, op0); break; case ADDR_EXPR: case INDIRECT_REF: case NOP_EXPR: dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); break; case COND_EXPR: pp_string (buffer, "("); dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); pp_string (buffer, ") ? "); dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, 0, false); pp_string (buffer, " : "); dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, 0, false); break; case COMPONENT_REF: /* The function is a pointer contained in a structure. */ if (TREE_CODE (TREE_OPERAND (op0, 0)) == INDIRECT_REF || TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 1)); else dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, 0, false); /* else We can have several levels of structures and a function pointer inside. This is not implemented yet... */ /* NIY;*/ break; case ARRAY_REF: if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 0)); else dump_generic_node (buffer, op0, 0, 0, false); break; case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node (buffer, op0, 0, 0, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string (pretty_printer *buffer, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string (buffer, "\\b"); break; case '\f': pp_string (buffer, "\\f"); break; case '\n': pp_string (buffer, "\\n"); break; case '\r': pp_string (buffer, "\\r"); break; case '\t': pp_string (buffer, "\\t"); break; case '\v': pp_string (buffer, "\\v"); break; case '\\': pp_string (buffer, "\\\\"); break; case '\"': pp_string (buffer, "\\\""); break; case '\'': pp_string (buffer, "\\'"); break; /* No need to handle \0; the loop terminates on \0. */ case '\1': pp_string (buffer, "\\1"); break; case '\2': pp_string (buffer, "\\2"); break; case '\3': pp_string (buffer, "\\3"); break; case '\4': pp_string (buffer, "\\4"); break; case '\5': pp_string (buffer, "\\5"); break; case '\6': pp_string (buffer, "\\6"); break; case '\7': pp_string (buffer, "\\7"); break; default: pp_character (buffer, str[0]); break; } str++; } } static void maybe_init_pretty_print (FILE *file) { if (!initialized) { pp_construct (&buffer, /* prefix */NULL, /* line-width */0); pp_needs_newline (&buffer) = true; initialized = 1; } buffer.buffer->stream = file; } static void newline_and_indent (pretty_printer *buffer, int spc) { pp_newline (buffer); INDENT (spc); } static void dump_vops (pretty_printer *buffer, tree stmt, int spc, int flags) { struct voptype_d *vdefs; struct voptype_d *vuses; int i, n; if (!ssa_operands_active () || !stmt_references_memory_p (stmt)) return; /* Even if the statement doesn't have virtual operators yet, it may contain symbol information (this happens before aliases have been computed). */ if ((flags & TDF_MEMSYMS) && VUSE_OPS (stmt) == NULL && VDEF_OPS (stmt) == NULL) { if (LOADED_SYMS (stmt)) { pp_string (buffer, "# LOADS: "); dump_symbols (buffer, LOADED_SYMS (stmt), flags); newline_and_indent (buffer, spc); } if (STORED_SYMS (stmt)) { pp_string (buffer, "# STORES: "); dump_symbols (buffer, STORED_SYMS (stmt), flags); newline_and_indent (buffer, spc); } return; } vuses = VUSE_OPS (stmt); while (vuses) { pp_string (buffer, "# VUSE <"); n = VUSE_NUM (vuses); for (i = 0; i < n; i++) { dump_generic_node (buffer, VUSE_OP (vuses, i), spc + 2, flags, false); if (i < n - 1) pp_string (buffer, ", "); } pp_string (buffer, ">"); if (flags & TDF_MEMSYMS) dump_symbols (buffer, LOADED_SYMS (stmt), flags); newline_and_indent (buffer, spc); vuses = vuses->next; } vdefs = VDEF_OPS (stmt); while (vdefs) { pp_string (buffer, "# "); dump_generic_node (buffer, VDEF_RESULT (vdefs), spc + 2, flags, false); pp_string (buffer, " = VDEF <"); n = VDEF_NUM (vdefs); for (i = 0; i < n; i++) { dump_generic_node (buffer, VDEF_OP (vdefs, i), spc + 2, flags, 0); if (i < n - 1) pp_string (buffer, ", "); } pp_string (buffer, ">"); if ((flags & TDF_MEMSYMS) && vdefs->next == NULL) dump_symbols (buffer, STORED_SYMS (stmt), flags); newline_and_indent (buffer, spc); vdefs = vdefs->next; } } /* Dumps basic block BB to FILE with details described by FLAGS and indented by INDENT spaces. */ void dump_generic_bb (FILE *file, basic_block bb, int indent, int flags) { maybe_init_pretty_print (file); dump_generic_bb_buff (&buffer, bb, indent, flags); pp_flush (&buffer); } /* Dumps header of basic block BB to buffer BUFFER indented by INDENT spaces and details described by flags. */ static void dump_bb_header (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; tree stmt; edge_iterator ei; if (flags & TDF_BLOCKS) { INDENT (indent); pp_string (buffer, "# BLOCK "); pp_decimal_int (buffer, bb->index); if (bb->frequency) { pp_string (buffer, " freq:"); pp_decimal_int (buffer, bb->frequency); } if (bb->count) { pp_string (buffer, " count:"); pp_widest_integer (buffer, bb->count); } if (flags & TDF_LINENO) { block_stmt_iterator bsi; for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) if (get_lineno (bsi_stmt (bsi)) != -1) { pp_string (buffer, ", starting at line "); pp_decimal_int (buffer, get_lineno (bsi_stmt (bsi))); break; } } newline_and_indent (buffer, indent); pp_string (buffer, "# PRED:"); pp_write_text_to_stream (buffer); FOR_EACH_EDGE (e, ei, bb->preds) if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->src == ENTRY_BLOCK_PTR) pp_string (buffer, "ENTRY"); else pp_decimal_int (buffer, e->src->index); } else dump_edge_info (buffer->buffer->stream, e, 0); pp_newline (buffer); } else { stmt = first_stmt (bb); if (!stmt || TREE_CODE (stmt) != LABEL_EXPR) { INDENT (indent - 2); pp_string (buffer, "<bb "); pp_decimal_int (buffer, bb->index); pp_string (buffer, ">:"); pp_newline (buffer); } } pp_write_text_to_stream (buffer); check_bb_profile (bb, buffer->buffer->stream); } /* Dumps end of basic block BB to buffer BUFFER indented by INDENT spaces. */ static void dump_bb_end (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; INDENT (indent); pp_string (buffer, "# SUCC:"); pp_write_text_to_stream (buffer); FOR_EACH_EDGE (e, ei, bb->succs) if (flags & TDF_SLIM) { pp_string (buffer, " "); if (e->dest == EXIT_BLOCK_PTR) pp_string (buffer, "EXIT"); else pp_decimal_int (buffer, e->dest->index); } else dump_edge_info (buffer->buffer->stream, e, 1); pp_newline (buffer); } /* Dump PHI nodes of basic block BB to BUFFER with details described by FLAGS and indented by INDENT spaces. */ static void dump_phi_nodes (pretty_printer *buffer, basic_block bb, int indent, int flags) { tree phi = phi_nodes (bb); if (!phi) return; for (; phi; phi = PHI_CHAIN (phi)) { if (is_gimple_reg (PHI_RESULT (phi)) || (flags & TDF_VOPS)) { INDENT (indent); pp_string (buffer, "# "); dump_generic_node (buffer, phi, indent, flags, false); pp_newline (buffer); } } } /* Dump jump to basic block BB that is represented implicitly in the cfg to BUFFER. */ static void pp_cfg_jump (pretty_printer *buffer, basic_block bb) { tree stmt; stmt = first_stmt (bb); pp_string (buffer, "goto <bb "); pp_decimal_int (buffer, bb->index); pp_string (buffer, ">"); if (stmt && TREE_CODE (stmt) == LABEL_EXPR) { pp_string (buffer, " ("); dump_generic_node (buffer, LABEL_EXPR_LABEL (stmt), 0, 0, false); pp_string (buffer, ")"); } pp_semicolon (buffer); } /* Dump edges represented implicitly in basic block BB to BUFFER, indented by INDENT spaces, with details given by FLAGS. */ static void dump_implicit_edges (pretty_printer *buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; tree stmt; stmt = last_stmt (bb); if (stmt && TREE_CODE (stmt) == COND_EXPR) { edge true_edge, false_edge; /* When we are emitting the code or changing CFG, it is possible that the edges are not yet created. When we are using debug_bb in such a situation, we do not want it to crash. */ if (EDGE_COUNT (bb->succs) != 2) return; extract_true_false_edges_from_block (bb, &true_edge, &false_edge); INDENT (indent + 2); pp_cfg_jump (buffer, true_edge->dest); newline_and_indent (buffer, indent); pp_string (buffer, "else"); newline_and_indent (buffer, indent + 2); pp_cfg_jump (buffer, false_edge->dest); pp_newline (buffer); return; } /* If there is a fallthru edge, we may need to add an artificial goto to the dump. */ FOR_EACH_EDGE (e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) break; if (e && e->dest != bb->next_bb) { INDENT (indent); if ((flags & TDF_LINENO) #ifdef USE_MAPPED_LOCATION && e->goto_locus != UNKNOWN_LOCATION #else && e->goto_locus #endif ) { expanded_location goto_xloc; #ifdef USE_MAPPED_LOCATION goto_xloc = expand_location (e->goto_locus); #else goto_xloc = *e->goto_locus; #endif pp_character (buffer, '['); if (goto_xloc.file) { pp_string (buffer, goto_xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, goto_xloc.line); pp_string (buffer, "] "); } pp_cfg_jump (buffer, e->dest); pp_newline (buffer); } } /* Dumps basic block BB to buffer BUFFER with details described by FLAGS and indented by INDENT spaces. */ static void dump_generic_bb_buff (pretty_printer *buffer, basic_block bb, int indent, int flags) { block_stmt_iterator bsi; tree stmt; int label_indent = indent - 2; if (label_indent < 0) label_indent = 0; dump_bb_header (buffer, bb, indent, flags); dump_phi_nodes (buffer, bb, indent, flags); for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi)) { int curr_indent; stmt = bsi_stmt (bsi); curr_indent = TREE_CODE (stmt) == LABEL_EXPR ? label_indent : indent; INDENT (curr_indent); dump_generic_node (buffer, stmt, curr_indent, flags, true); pp_newline (buffer); dump_histograms_for_stmt (cfun, buffer->buffer->stream, stmt); } dump_implicit_edges (buffer, bb, indent, flags); if (flags & TDF_BLOCKS) dump_bb_end (buffer, bb, indent, flags); }
#include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "output.h" #include "diagnostic.h" #include "real.h" #include "hashtab.h" #include "tree-flow.h" #include "langhooks.h" #include "tree-iterator.h" #include "tree-chrec.h" #include "tree-pass.h" #include "fixed-value.h" #include "value-prof.h" /* Local functions, macros and variables. */ static int op_prio(const_tree); static const char *op_symbol(const_tree); static void pretty_print_string(pretty_printer *, const char *); static void print_call_name(pretty_printer *, const_tree); static void newline_and_indent(pretty_printer *, int); static void maybe_init_pretty_print(FILE *); static void print_declaration(pretty_printer *, tree, int, int); static void print_struct_decl(pretty_printer *, const_tree, int, int); static void do_niy(pretty_printer *, const_tree); static void dump_vops(pretty_printer *, tree, int, int); static void dump_generic_bb_buff(pretty_printer *, basic_block, int, int); #define INDENT(SPACE) do { \ int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0) #define NIY do_niy(buffer,node) #define PRINT_FUNCTION_NAME(NODE) pp_printf \ (buffer, "%s", TREE_CODE (NODE) == NOP_EXPR ? \ lang_hooks.decl_printable_name (TREE_OPERAND (NODE, 0), 1) : \ lang_hooks.decl_printable_name (NODE, 1)) static pretty_printer buffer; static int initialized = 0; /* Try to print something for an unknown tree code. */ static void do_niy(pretty_printer * buffer, const_tree node) { int i, len; pp_string(buffer, "<<< Unknown tree: "); pp_string(buffer, tree_code_name[(int)TREE_CODE(node)]); if (EXPR_P(node)) { len = TREE_OPERAND_LENGTH(node); for (i = 0; i < len; ++i) { newline_and_indent(buffer, 2); dump_generic_node(buffer, TREE_OPERAND(node, i), 2, 0, false); } } pp_string(buffer, " >>>\n"); } /* Debugging function to print out a generic expression. */ void debug_generic_expr(tree t) { print_generic_expr(stderr, t, TDF_VOPS | TDF_MEMSYMS); fprintf(stderr, "\n"); } /* Debugging function to print out a generic statement. */ void debug_generic_stmt(tree t) { print_generic_stmt(stderr, t, TDF_VOPS | TDF_MEMSYMS); fprintf(stderr, "\n"); } /* Debugging function to print out a chain of trees . */ void debug_tree_chain(tree t) { while (t) { print_generic_expr(stderr, t, TDF_VOPS | TDF_MEMSYMS | TDF_UID); fprintf(stderr, " "); t = TREE_CHAIN(t); } fprintf(stderr, "\n"); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl(FILE * file, tree decl, int flags) { maybe_init_pretty_print(file); print_declaration(&buffer, decl, 2, flags); pp_write_text_to_stream(&buffer); } /* * Print tree T, and its successors, on file FILE. FLAGS specifies details * to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_stmt(FILE * file, tree t, int flags) { maybe_init_pretty_print(file); dump_generic_node(&buffer, t, 0, flags, true); pp_flush(&buffer); } /* * Print tree T, and its successors, on file FILE. FLAGS specifies details * to show in the dump. See TDF_* in tree-pass.h. The output is indented by * INDENT spaces. */ void print_generic_stmt_indented(FILE * file, tree t, int flags, int indent) { int i; maybe_init_pretty_print(file); for (i = 0; i < indent; i++) pp_space(&buffer); dump_generic_node(&buffer, t, indent, flags, true); pp_flush(&buffer); } /* * Print a single expression T on file FILE. FLAGS specifies details to show * in the dump. See TDF_* in tree-pass.h. */ void print_generic_expr(FILE * file, tree t, int flags) { maybe_init_pretty_print(file); dump_generic_node(&buffer, t, 0, flags, false); } /* * Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name(pretty_printer * buffer, tree node, int flags) { tree t = node; if (DECL_NAME(t)) pp_tree_identifier(buffer, DECL_NAME(t)); if ((flags & TDF_UID) || DECL_NAME(t) == NULL_TREE) { if (TREE_CODE(t) == LABEL_DECL && LABEL_DECL_UID(t) != -1) pp_printf(buffer, "L.%d", (int)LABEL_DECL_UID(t)); else { char c = TREE_CODE(t) == CONST_DECL ? 'C' : 'D'; pp_printf(buffer, "%c.%u", c, DECL_UID(t)); } } } /* Like the above, but used for pretty printing function calls. */ static void dump_function_name(pretty_printer * buffer, tree node) { if (DECL_NAME(node)) PRINT_FUNCTION_NAME(node); else dump_decl_name(buffer, node, 0); } /* * Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and * FLAGS are as in dump_generic_node. */ static void dump_function_declaration(pretty_printer * buffer, tree node, int spc, int flags) { bool wrote_arg = false; tree arg; pp_space(buffer); pp_character(buffer, '('); /* * Print the argument types. The last element in the list is a * VOID_TYPE. The following avoids printing the last element. */ arg = TYPE_ARG_TYPES(node); while (arg && TREE_CHAIN(arg) && arg != error_mark_node) { wrote_arg = true; dump_generic_node(buffer, TREE_VALUE(arg), spc, flags, false); arg = TREE_CHAIN(arg); if (TREE_CHAIN(arg) && TREE_CODE(TREE_CHAIN(arg)) == TREE_LIST) { pp_character(buffer, ','); pp_space(buffer); } } if (!wrote_arg) pp_string(buffer, "void"); pp_character(buffer, ')'); } /* Dump the domain associated with an array. */ static void dump_array_domain(pretty_printer * buffer, tree domain, int spc, int flags) { pp_character(buffer, '['); if (domain) { tree min = TYPE_MIN_VALUE(domain); tree max = TYPE_MAX_VALUE(domain); if (min && max && integer_zerop(min) && host_integerp(max, 0)) pp_wide_integer(buffer, TREE_INT_CST_LOW(max) + 1); else { if (min) dump_generic_node(buffer, min, spc, flags, false); pp_character(buffer, ':'); if (max) dump_generic_node(buffer, max, spc, flags, false); } } else pp_string(buffer, "<unknown>"); pp_character(buffer, ']'); } /* * Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in * dump_generic_node. */ static void dump_omp_clause(pretty_printer * buffer, tree clause, int spc, int flags) { const char *name; switch (OMP_CLAUSE_CODE(clause)) { case OMP_CLAUSE_PRIVATE: name = "private"; goto print_remap; case OMP_CLAUSE_SHARED: name = "shared"; goto print_remap; case OMP_CLAUSE_FIRSTPRIVATE: name = "firstprivate"; goto print_remap; case OMP_CLAUSE_LASTPRIVATE: name = "lastprivate"; goto print_remap; case OMP_CLAUSE_COPYIN: name = "copyin"; goto print_remap; case OMP_CLAUSE_COPYPRIVATE: name = "copyprivate"; goto print_remap; print_remap: pp_string(buffer, name); pp_character(buffer, '('); dump_generic_node(buffer, OMP_CLAUSE_DECL(clause), spc, flags, false); pp_character(buffer, ')'); break; case OMP_CLAUSE_REDUCTION: pp_string(buffer, "reduction("); pp_string(buffer, op_symbol_code(OMP_CLAUSE_REDUCTION_CODE(clause))); pp_character(buffer, ':'); dump_generic_node(buffer, OMP_CLAUSE_DECL(clause), spc, flags, false); pp_character(buffer, ')'); break; case OMP_CLAUSE_IF: pp_string(buffer, "if("); dump_generic_node(buffer, OMP_CLAUSE_IF_EXPR(clause), spc, flags, false); pp_character(buffer, ')'); break; case OMP_CLAUSE_NUM_THREADS: pp_string(buffer, "num_threads("); dump_generic_node(buffer, OMP_CLAUSE_NUM_THREADS_EXPR(clause), spc, flags, false); pp_character(buffer, ')'); break; case OMP_CLAUSE_NOWAIT: pp_string(buffer, "nowait"); break; case OMP_CLAUSE_ORDERED: pp_string(buffer, "ordered"); break; case OMP_CLAUSE_DEFAULT: pp_string(buffer, "default("); switch (OMP_CLAUSE_DEFAULT_KIND(clause)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: pp_string(buffer, "shared"); break; case OMP_CLAUSE_DEFAULT_NONE: pp_string(buffer, "none"); break; case OMP_CLAUSE_DEFAULT_PRIVATE: pp_string(buffer, "private"); break; default: gcc_unreachable(); } pp_character(buffer, ')'); break; case OMP_CLAUSE_SCHEDULE: pp_string(buffer, "schedule("); switch (OMP_CLAUSE_SCHEDULE_KIND(clause)) { case OMP_CLAUSE_SCHEDULE_STATIC: pp_string(buffer, "static"); break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: pp_string(buffer, "dynamic"); break; case OMP_CLAUSE_SCHEDULE_GUIDED: pp_string(buffer, "guided"); break; case OMP_CLAUSE_SCHEDULE_RUNTIME: pp_string(buffer, "runtime"); break; default: gcc_unreachable(); } if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR(clause)) { pp_character(buffer, ','); dump_generic_node(buffer, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR(clause), spc, flags, false); } pp_character(buffer, ')'); break; default: /* Should never happen. */ dump_generic_node(buffer, clause, spc, flags, false); break; } } /* * Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in * dump_generic_node. */ static void dump_omp_clauses(pretty_printer * buffer, tree clause, int spc, int flags) { if (clause == NULL) return; pp_space(buffer); while (1) { dump_omp_clause(buffer, clause, spc, flags); clause = OMP_CLAUSE_CHAIN(clause); if (clause == NULL) return; pp_space(buffer); } } /* * Dump the set of decls SYMS. BUFFER, SPC and FLAGS are as in * dump_generic_node. */ static void dump_symbols(pretty_printer * buffer, bitmap syms, int flags) { unsigned i; bitmap_iterator bi; if (syms == NULL) pp_string(buffer, "NIL"); else { pp_string(buffer, " { "); EXECUTE_IF_SET_IN_BITMAP(syms, 0, i, bi) { tree sym = referenced_var_lookup(i); dump_generic_node(buffer, sym, 0, flags, false); pp_string(buffer, " "); } pp_string(buffer, "}"); } } /* * Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent. * FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). If * IS_STMT is true, the object printed is considered to be a statement and it * is terminated by ';' if appropriate. */ int dump_generic_node(pretty_printer * buffer, tree node, int spc, int flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; if (node == NULL_TREE) return spc; is_expr = EXPR_P(node) || GIMPLE_STMT_P(node); /* * We use has_stmt_ann because CALL_EXPR can be both an expression and a * statement, and we have no guarantee that it will have a stmt_ann when * it is used as an RHS expression. stmt_ann will assert if you call it * on something with a non-stmt annotation attached. */ if (TREE_CODE(node) != ERROR_MARK && is_gimple_stmt(node) && (flags & (TDF_VOPS | TDF_MEMSYMS)) && has_stmt_ann(node) && TREE_CODE(node) != PHI_NODE) dump_vops(buffer, node, spc, flags); if (is_stmt && (flags & TDF_STMTADDR)) pp_printf(buffer, "<&%p> ", (void *)node); if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION(node)) { expanded_location xloc = expand_location(EXPR_LOCATION(node)); pp_character(buffer, '['); if (xloc.file) { pp_string(buffer, xloc.file); pp_string(buffer, " : "); } pp_decimal_int(buffer, xloc.line); pp_string(buffer, "] "); } switch (TREE_CODE(node)) { case ERROR_MARK: pp_string(buffer, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier(buffer, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE(node)) { dump_generic_node(buffer, TREE_PURPOSE(node), spc, flags, false); pp_space(buffer); } dump_generic_node(buffer, TREE_VALUE(node), spc, flags, false); node = TREE_CHAIN(node); if (node && TREE_CODE(node) == TREE_LIST) { pp_character(buffer, ','); pp_space(buffer); } } break; case TREE_BINFO: dump_generic_node(buffer, BINFO_TYPE(node), spc, flags, false); case TREE_VEC: { size_t i; if (TREE_VEC_LENGTH(node) > 0) { size_t len = TREE_VEC_LENGTH(node); for (i = 0; i < len - 1; i++) { dump_generic_node(buffer, TREE_VEC_ELT(node, i), spc, flags, false); pp_character(buffer, ','); pp_space(buffer); } dump_generic_node(buffer, TREE_VEC_ELT(node, len - 1), spc, flags, false); } } break; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: { unsigned int quals = TYPE_QUALS(node); enum tree_code_class class; if (quals & TYPE_QUAL_CONST) pp_string(buffer, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string(buffer, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string(buffer, "restrict "); class = TREE_CODE_CLASS(TREE_CODE(node)); if (class == tcc_declaration) { if (DECL_NAME(node)) dump_decl_name(buffer, node, flags); else pp_string(buffer, "<unnamed type decl>"); } else if (class == tcc_type) { if (TYPE_NAME(node)) { if (TREE_CODE(TYPE_NAME(node)) == IDENTIFIER_NODE) pp_tree_identifier(buffer, TYPE_NAME(node)); else if (TREE_CODE(TYPE_NAME(node)) == TYPE_DECL && DECL_NAME(TYPE_NAME(node))) dump_decl_name(buffer, TYPE_NAME(node), flags); else pp_string(buffer, "<unnamed type>"); } else if (TREE_CODE(node) == VECTOR_TYPE) { pp_string(buffer, "vector "); dump_generic_node(buffer, TREE_TYPE(node), spc, flags, false); } else if (TREE_CODE(node) == INTEGER_TYPE) { pp_string(buffer, (TYPE_UNSIGNED(node) ? "<unnamed-unsigned:" : "<unnamed-signed:")); pp_decimal_int(buffer, TYPE_PRECISION(node)); pp_string(buffer, ">"); } else pp_string(buffer, "<unnamed type>"); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE(node) == POINTER_TYPE ? "*" : "&"); if (TREE_CODE(TREE_TYPE(node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE(node); dump_generic_node(buffer, TREE_TYPE(fnode), spc, flags, false); pp_space(buffer); pp_character(buffer, '('); pp_string(buffer, str); if (TYPE_NAME(node) && DECL_NAME(TYPE_NAME(node))) dump_decl_name(buffer, TYPE_NAME(node), flags); else pp_printf(buffer, "<T%x>", TYPE_UID(node)); pp_character(buffer, ')'); dump_function_declaration(buffer, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS(node); dump_generic_node(buffer, TREE_TYPE(node), spc, flags, false); pp_space(buffer); pp_string(buffer, str); if (quals & TYPE_QUAL_CONST) pp_string(buffer, " const"); if (quals & TYPE_QUAL_VOLATILE) pp_string(buffer, " volatile"); if (quals & TYPE_QUAL_RESTRICT) pp_string(buffer, " restrict"); if (TYPE_REF_CAN_ALIAS_ALL(node)) pp_string(buffer, " {ref-all}"); } break; case OFFSET_TYPE: NIY; break; case METHOD_TYPE: dump_decl_name(buffer, TYPE_NAME(TYPE_METHOD_BASETYPE(node)), flags); pp_string(buffer, "::"); break; case TARGET_MEM_REF: { const char *sep = ""; tree tmp; pp_string(buffer, "MEM["); tmp = TMR_SYMBOL(node); if (tmp) { pp_string(buffer, sep); sep = ", "; pp_string(buffer, "symbol: "); dump_generic_node(buffer, tmp, spc, flags, false); } tmp = TMR_BASE(node); if (tmp) { pp_string(buffer, sep); sep = ", "; pp_string(buffer, "base: "); dump_generic_node(buffer, tmp, spc, flags, false); } tmp = TMR_INDEX(node); if (tmp) { pp_string(buffer, sep); sep = ", "; pp_string(buffer, "index: "); dump_generic_node(buffer, tmp, spc, flags, false); } tmp = TMR_STEP(node); if (tmp) { pp_string(buffer, sep); sep = ", "; pp_string(buffer, "step: "); dump_generic_node(buffer, tmp, spc, flags, false); } tmp = TMR_OFFSET(node); if (tmp) { pp_string(buffer, sep); sep = ", "; pp_string(buffer, "offset: "); dump_generic_node(buffer, tmp, spc, flags, false); } pp_string(buffer, "]"); if (flags & TDF_DETAILS) { pp_string(buffer, "{"); dump_generic_node(buffer, TMR_ORIGINAL(node), spc, flags, false); pp_string(buffer, "}"); } } break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE(node); TREE_CODE(tmp) == ARRAY_TYPE; tmp = TREE_TYPE(tmp)) ; dump_generic_node(buffer, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE(tmp) == ARRAY_TYPE; tmp = TREE_TYPE(tmp)) dump_array_domain(buffer, TYPE_DOMAIN(tmp), spc, flags); break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { unsigned int quals = TYPE_QUALS(node); if (quals & TYPE_QUAL_CONST) pp_string(buffer, "const "); if (quals & TYPE_QUAL_VOLATILE) pp_string(buffer, "volatile "); /* Print the name of the structure. */ if (TREE_CODE(node) == RECORD_TYPE) pp_string(buffer, "struct "); else if (TREE_CODE(node) == UNION_TYPE) pp_string(buffer, "union "); if (TYPE_NAME(node)) dump_generic_node(buffer, TYPE_NAME(node), spc, flags, false); else print_struct_decl(buffer, node, spc, flags); break; } case LANG_TYPE: NIY; break; case INTEGER_CST: if (TREE_CODE(TREE_TYPE(node)) == POINTER_TYPE) { /* * In the case of a pointer, one may want to divide by the size * of the pointed-to type. Unfortunately, this not * straightforward. The C front-end maps expressions * * (int *) 5 int *p; (p + 5) * * in such a way that the two INTEGER_CST nodes for "5" have * different values but identical types. In the latter case, the * 5 is multiplied by sizeof (int) in c-common.c * (pointer_int_sum) to convert it to a byte address, and yet the * type of the node is left unchanged. Argh. What is consistent * though is that the number value corresponds to bytes (UNITS) * offset. * * NB: Neither of the following divisors can be trivially used to * recover the original literal: * * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) * TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer(buffer, TREE_INT_CST_LOW(node)); pp_string(buffer, "B"); /* pseudo-unit */ } else if (!host_integerp(node, 0)) { tree val = node; unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW(val); HOST_WIDE_INT high = TREE_INT_CST_HIGH(val); if (tree_int_cst_sgn(val) < 0) { pp_character(buffer, '-'); high = ~high + !low; low = -low; } /* * Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ sprintf(pp_buffer(buffer)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, high, low); pp_string(buffer, pp_buffer(buffer)->digit_buffer); } else pp_wide_integer(buffer, TREE_INT_CST_LOW(node)); break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW(node)) pp_string(buffer, " overflow"); #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) d = TREE_REAL_CST(node); if (REAL_VALUE_ISINF(d)) pp_string(buffer, REAL_VALUE_NEGATIVE(d) ? " -Inf" : " Inf"); else if (REAL_VALUE_ISNAN(d)) pp_string(buffer, " Nan"); else { char string[100]; real_to_decimal(string, &d, sizeof(string), 0, 1); pp_string(buffer, string); } #else { HOST_WIDE_INT i; unsigned char *p = (unsigned char *)&TREE_REAL_CST(node); pp_string(buffer, "0x"); for (i = 0; i < sizeof TREE_REAL_CST(node); i++) output_formatted_integer(buffer, "%02x", *p++); } #endif break; } case FIXED_CST: { char string[100]; fixed_to_decimal(string, TREE_FIXED_CST_PTR(node), sizeof(string)); pp_string(buffer, string); break; } case COMPLEX_CST: pp_string(buffer, "__complex__ ("); dump_generic_node(buffer, TREE_REALPART(node), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_IMAGPART(node), spc, flags, false); pp_string(buffer, ")"); break; case STRING_CST: pp_string(buffer, "\""); pretty_print_string(buffer, TREE_STRING_POINTER(node)); pp_string(buffer, "\""); break; case VECTOR_CST: { tree elt; pp_string(buffer, "{ "); for (elt = TREE_VECTOR_CST_ELTS(node); elt; elt = TREE_CHAIN(elt)) { dump_generic_node(buffer, TREE_VALUE(elt), spc, flags, false); if (TREE_CHAIN(elt)) pp_string(buffer, ", "); } pp_string(buffer, " }"); } break; case FUNCTION_TYPE: break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name(buffer, node, flags); break; case LABEL_DECL: if (DECL_NAME(node)) dump_decl_name(buffer, node, flags); else if (LABEL_DECL_UID(node) != -1) pp_printf(buffer, "<L%d>", (int)LABEL_DECL_UID(node)); else pp_printf(buffer, "<D.%u>", DECL_UID(node)); break; case TYPE_DECL: if (DECL_IS_BUILTIN(node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME(node)) dump_decl_name(buffer, node, flags); else { if ((TREE_CODE(TREE_TYPE(node)) == RECORD_TYPE || TREE_CODE(TREE_TYPE(node)) == UNION_TYPE) && TYPE_METHODS(TREE_TYPE(node))) { /* * The type is a c++ class: all structures have at least 4 * methods. */ pp_string(buffer, "class "); dump_generic_node(buffer, TREE_TYPE(node), spc, flags, false); } else { pp_string(buffer, (TREE_CODE(TREE_TYPE(node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node(buffer, TREE_TYPE(node), spc, flags, false); } } break; case SYMBOL_MEMORY_TAG: case NAME_MEMORY_TAG: case STRUCT_FIELD_TAG: case VAR_DECL: case PARM_DECL: case FIELD_DECL: case NAMESPACE_DECL: case MEMORY_PARTITION_TAG: dump_decl_name(buffer, node, flags); break; case RESULT_DECL: pp_string(buffer, "<retval>"); break; case COMPONENT_REF: op0 = TREE_OPERAND(node, 0); str = "."; if (TREE_CODE(op0) == INDIRECT_REF) { op0 = TREE_OPERAND(op0, 0); str = "->"; } if (op_prio(op0) < op_prio(node)) pp_character(buffer, '('); dump_generic_node(buffer, op0, spc, flags, false); if (op_prio(op0) < op_prio(node)) pp_character(buffer, ')'); pp_string(buffer, str); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); if (TREE_CODE(op0) != VALUE_HANDLE) { op0 = component_ref_field_offset(node); if (op0 && TREE_CODE(op0) != INTEGER_CST) { pp_string(buffer, "{off: "); dump_generic_node(buffer, op0, spc, flags, false); pp_character(buffer, '}'); } } break; case BIT_FIELD_REF: pp_string(buffer, "BIT_FIELD_REF <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 2), spc, flags, false); pp_string(buffer, ">"); break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND(node, 0); if (op_prio(op0) < op_prio(node)) pp_character(buffer, '('); dump_generic_node(buffer, op0, spc, flags, false); if (op_prio(op0) < op_prio(node)) pp_character(buffer, ')'); pp_character(buffer, '['); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); if (TREE_CODE(node) == ARRAY_RANGE_REF) pp_string(buffer, " ..."); pp_character(buffer, ']'); op0 = array_ref_low_bound(node); op1 = array_ref_element_size(node); if (!integer_zerop(op0) || TREE_OPERAND(node, 2) || TREE_OPERAND(node, 3)) { pp_string(buffer, "{lb: "); dump_generic_node(buffer, op0, spc, flags, false); pp_string(buffer, " sz: "); dump_generic_node(buffer, op1, spc, flags, false); pp_character(buffer, '}'); } break; case CONSTRUCTOR: { unsigned HOST_WIDE_INT ix; tree field, val; bool is_struct_init = FALSE; pp_character(buffer, '{'); if (TREE_CODE(TREE_TYPE(node)) == RECORD_TYPE || TREE_CODE(TREE_TYPE(node)) == UNION_TYPE) is_struct_init = TRUE; FOR_EACH_CONSTRUCTOR_ELT(CONSTRUCTOR_ELTS(node), ix, field, val) { if (field && is_struct_init) { pp_character(buffer, '.'); dump_generic_node(buffer, field, spc, flags, false); pp_string(buffer, "="); } if (val && TREE_CODE(val) == ADDR_EXPR) if (TREE_CODE(TREE_OPERAND(val, 0)) == FUNCTION_DECL) val = TREE_OPERAND(val, 0); if (val && TREE_CODE(val) == FUNCTION_DECL) dump_decl_name(buffer, val, flags); else dump_generic_node(buffer, val, spc, flags, false); if (ix != VEC_length(constructor_elt, CONSTRUCTOR_ELTS(node)) - 1) { pp_character(buffer, ','); pp_space(buffer); } } pp_character(buffer, '}'); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string(buffer, "<COMPOUND_EXPR>"); break; } dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent(buffer, spc); else { pp_character(buffer, ','); pp_space(buffer); } for (tp = &TREE_OPERAND(node, 1); TREE_CODE(*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND(*tp, 1)) { dump_generic_node(buffer, TREE_OPERAND(*tp, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent(buffer, spc); else { pp_character(buffer, ','); pp_space(buffer); } } dump_generic_node(buffer, *tp, spc, flags, !(flags & TDF_SLIM)); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if (flags & TDF_SLIM) { pp_string(buffer, "<STATEMENT_LIST>"); break; } for (si = tsi_start(node); !tsi_end_p(si); tsi_next(&si)) { if (!first) newline_and_indent(buffer, spc); else first = false; dump_generic_node(buffer, tsi_stmt(si), spc, flags, true); } } break; case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: case INIT_EXPR: dump_generic_node(buffer, GENERIC_TREE_OPERAND(node, 0), spc, flags, false); pp_space(buffer); pp_character(buffer, '='); if (TREE_CODE(node) == GIMPLE_MODIFY_STMT && MOVE_NONTEMPORAL(node)) pp_string(buffer, "{nt}"); if (TREE_CODE(node) == GIMPLE_MODIFY_STMT) { stmt_ann_t ann; if ((ann = stmt_ann(node)) && ann->has_volatile_ops) pp_string(buffer, "{v}"); } pp_space(buffer); dump_generic_node(buffer, GENERIC_TREE_OPERAND(node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string(buffer, "TARGET_EXPR <"); dump_generic_node(buffer, TARGET_EXPR_SLOT(node), spc, flags, false); pp_character(buffer, ','); pp_space(buffer); dump_generic_node(buffer, TARGET_EXPR_INITIAL(node), spc, flags, false); pp_character(buffer, '>'); break; case DECL_EXPR: print_declaration(buffer, DECL_EXPR_DECL(node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE(node) == NULL || TREE_TYPE(node) == void_type_node) { pp_string(buffer, "if ("); dump_generic_node(buffer, COND_EXPR_COND(node), spc, flags, false); pp_character(buffer, ')'); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN(node) && (IS_EMPTY_STMT(COND_EXPR_THEN(node)) || TREE_CODE(COND_EXPR_THEN(node)) == GOTO_EXPR) && COND_EXPR_ELSE(node) && (IS_EMPTY_STMT(COND_EXPR_ELSE(node)) || TREE_CODE(COND_EXPR_ELSE(node)) == GOTO_EXPR)) { pp_space(buffer); dump_generic_node(buffer, COND_EXPR_THEN(node), 0, flags, true); if (!IS_EMPTY_STMT(COND_EXPR_ELSE(node))) { pp_string(buffer, " else "); dump_generic_node(buffer, COND_EXPR_ELSE(node), 0, flags, true); } } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN(node)) { newline_and_indent(buffer, spc + 2); pp_character(buffer, '{'); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, COND_EXPR_THEN(node), spc + 4, flags, true); newline_and_indent(buffer, spc + 2); pp_character(buffer, '}'); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE(node) && !IS_EMPTY_STMT(COND_EXPR_ELSE(node))) { newline_and_indent(buffer, spc); pp_string(buffer, "else"); newline_and_indent(buffer, spc + 2); pp_character(buffer, '{'); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, COND_EXPR_ELSE(node), spc + 4, flags, true); newline_and_indent(buffer, spc + 2); pp_character(buffer, '}'); } } is_expr = false; } else { dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_space(buffer); pp_character(buffer, '?'); pp_space(buffer); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_space(buffer); pp_character(buffer, ':'); pp_space(buffer); dump_generic_node(buffer, TREE_OPERAND(node, 2), spc, flags, false); } break; case BIND_EXPR: pp_character(buffer, '{'); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS(node)) { pp_newline(buffer); for (op0 = BIND_EXPR_VARS(node); op0; op0 = TREE_CHAIN(op0)) { print_declaration(buffer, op0, spc + 2, flags); pp_newline(buffer); } } newline_and_indent(buffer, spc + 2); dump_generic_node(buffer, BIND_EXPR_BODY(node), spc + 2, flags, true); newline_and_indent(buffer, spc); pp_character(buffer, '}'); } is_expr = false; break; case CALL_EXPR: print_call_name(buffer, node); /* Print parameters. */ pp_space(buffer); pp_character(buffer, '('); { tree arg; call_expr_arg_iterator iter; FOR_EACH_CALL_EXPR_ARG(arg, iter, node) { dump_generic_node(buffer, arg, spc, flags, false); if (more_call_expr_args_p(&iter)) { pp_character(buffer, ','); pp_space(buffer); } } } if (CALL_EXPR_VA_ARG_PACK(node)) { if (call_expr_nargs(node) > 0) { pp_character(buffer, ','); pp_space(buffer); } pp_string(buffer, "__builtin_va_arg_pack ()"); } pp_character(buffer, ')'); op1 = CALL_EXPR_STATIC_CHAIN(node); if (op1) { pp_string(buffer, " [static-chain: "); dump_generic_node(buffer, op1, spc, flags, false); pp_character(buffer, ']'); } if (CALL_EXPR_RETURN_SLOT_OPT(node)) pp_string(buffer, " [return slot optimization]"); if (CALL_EXPR_TAILCALL(node)) pp_string(buffer, " [tail call]"); break; case STATIC_CHAIN_EXPR: pp_string(buffer, "<<static chain of "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ">>"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string(buffer, "<<cleanup_point "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ">>"); break; case PLACEHOLDER_EXPR: pp_string(buffer, "<PLACEHOLDER_EXPR "); dump_generic_node(buffer, TREE_TYPE(node), spc, flags, false); pp_character(buffer, '>'); break; /* Binary arithmetic and logic expressions. */ case WIDEN_SUM_EXPR: case WIDEN_MULT_EXPR: case MULT_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol(node); op0 = TREE_OPERAND(node, 0); op1 = TREE_OPERAND(node, 1); /* * When the operands are expressions with less priority, keep * semantics of the tree representation. */ if (op_prio(op0) <= op_prio(node)) { pp_character(buffer, '('); dump_generic_node(buffer, op0, spc, flags, false); pp_character(buffer, ')'); } else dump_generic_node(buffer, op0, spc, flags, false); pp_space(buffer); pp_string(buffer, op); pp_space(buffer); /* * When the operands are expressions with less priority, keep * semantics of the tree representation. */ if (op_prio(op1) <= op_prio(node)) { pp_character(buffer, '('); dump_generic_node(buffer, op1, spc, flags, false); pp_character(buffer, ')'); } else dump_generic_node(buffer, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: if (TREE_CODE(node) == ADDR_EXPR && (TREE_CODE(TREE_OPERAND(node, 0)) == STRING_CST || TREE_CODE(TREE_OPERAND(node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function * pointers. */ else pp_string(buffer, op_symbol(node)); if (op_prio(TREE_OPERAND(node, 0)) < op_prio(node)) { pp_character(buffer, '('); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_character(buffer, ')'); } else dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); if (TREE_CODE(node) == MISALIGNED_INDIRECT_REF) { pp_string(buffer, "{misalignment: "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_character(buffer, '}'); } break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio(TREE_OPERAND(node, 0)) < op_prio(node)) { pp_character(buffer, '('); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_character(buffer, ')'); } else dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, op_symbol(node)); break; case MIN_EXPR: pp_string(buffer, "MIN_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_character(buffer, '>'); break; case MAX_EXPR: pp_string(buffer, "MAX_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_character(buffer, '>'); break; case ABS_EXPR: pp_string(buffer, "ABS_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_character(buffer, '>'); break; case RANGE_EXPR: NIY; break; case FIXED_CONVERT_EXPR: case FIX_TRUNC_EXPR: case FLOAT_EXPR: case CONVERT_EXPR: case NOP_EXPR: type = TREE_TYPE(node); op0 = TREE_OPERAND(node, 0); if (type != TREE_TYPE(op0)) { pp_character(buffer, '('); dump_generic_node(buffer, type, spc, flags, false); pp_string(buffer, ") "); } if (op_prio(op0) < op_prio(node)) pp_character(buffer, '('); dump_generic_node(buffer, op0, spc, flags, false); if (op_prio(op0) < op_prio(node)) pp_character(buffer, ')'); break; case VIEW_CONVERT_EXPR: pp_string(buffer, "VIEW_CONVERT_EXPR<"); dump_generic_node(buffer, TREE_TYPE(node), spc, flags, false); pp_string(buffer, ">("); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_character(buffer, ')'); break; case NON_LVALUE_EXPR: pp_string(buffer, "NON_LVALUE_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_character(buffer, '>'); break; case SAVE_EXPR: pp_string(buffer, "SAVE_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_character(buffer, '>'); break; case COMPLEX_EXPR: pp_string(buffer, "COMPLEX_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, ">"); break; case CONJ_EXPR: pp_string(buffer, "CONJ_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ">"); break; case REALPART_EXPR: pp_string(buffer, "REALPART_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ">"); break; case IMAGPART_EXPR: pp_string(buffer, "IMAGPART_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ">"); break; case VA_ARG_EXPR: pp_string(buffer, "VA_ARG_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ">"); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string(buffer, "try"); newline_and_indent(buffer, spc + 2); pp_string(buffer, "{"); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc + 4, flags, true); newline_and_indent(buffer, spc + 2); pp_string(buffer, "}"); newline_and_indent(buffer, spc); pp_string(buffer, (TREE_CODE(node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent(buffer, spc + 2); pp_string(buffer, "{"); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc + 4, flags, true); newline_and_indent(buffer, spc + 2); pp_string(buffer, "}"); is_expr = false; break; case CATCH_EXPR: pp_string(buffer, "catch ("); dump_generic_node(buffer, CATCH_TYPES(node), spc + 2, flags, false); pp_string(buffer, ")"); newline_and_indent(buffer, spc + 2); pp_string(buffer, "{"); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, CATCH_BODY(node), spc + 4, flags, true); newline_and_indent(buffer, spc + 2); pp_string(buffer, "}"); is_expr = false; break; case EH_FILTER_EXPR: pp_string(buffer, "<<<eh_filter ("); dump_generic_node(buffer, EH_FILTER_TYPES(node), spc + 2, flags, false); pp_string(buffer, ")>>>"); newline_and_indent(buffer, spc + 2); pp_string(buffer, "{"); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, EH_FILTER_FAILURE(node), spc + 4, flags, true); newline_and_indent(buffer, spc + 2); pp_string(buffer, "}"); is_expr = false; break; case CHANGE_DYNAMIC_TYPE_EXPR: pp_string(buffer, "<<<change_dynamic_type ("); dump_generic_node(buffer, CHANGE_DYNAMIC_TYPE_NEW_TYPE(node), spc + 2, flags, false); pp_string(buffer, ") "); dump_generic_node(buffer, CHANGE_DYNAMIC_TYPE_LOCATION(node), spc + 2, flags, false); pp_string(buffer, ")>>>"); is_expr = false; break; case LABEL_EXPR: op0 = TREE_OPERAND(node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME(op0)) { const char *name = IDENTIFIER_POINTER(DECL_NAME(op0)); if (strcmp(name, "break") == 0 || strcmp(name, "continue") == 0) break; } dump_generic_node(buffer, op0, spc, flags, false); pp_character(buffer, ':'); if (DECL_NONLOCAL(op0)) pp_string(buffer, " [non-local]"); break; case EXC_PTR_EXPR: pp_string(buffer, "<<<exception object>>>"); break; case FILTER_EXPR: pp_string(buffer, "<<<filter object>>>"); break; case LOOP_EXPR: pp_string(buffer, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent(buffer, spc + 2); pp_character(buffer, '{'); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, LOOP_EXPR_BODY(node), spc + 4, flags, true); newline_and_indent(buffer, spc + 2); pp_character(buffer, '}'); } is_expr = false; break; case RETURN_EXPR: pp_string(buffer, "return"); op0 = TREE_OPERAND(node, 0); if (op0) { pp_space(buffer); if (TREE_CODE(op0) == MODIFY_EXPR || TREE_CODE(op0) == GIMPLE_MODIFY_STMT) dump_generic_node(buffer, GENERIC_TREE_OPERAND(op0, 1), spc, flags, false); else dump_generic_node(buffer, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string(buffer, "if ("); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ") break"); break; case SWITCH_EXPR: pp_string(buffer, "switch ("); dump_generic_node(buffer, SWITCH_COND(node), spc, flags, false); pp_character(buffer, ')'); if (!(flags & TDF_SLIM)) { newline_and_indent(buffer, spc + 2); pp_character(buffer, '{'); if (SWITCH_BODY(node)) { newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, SWITCH_BODY(node), spc + 4, flags, true); } else { tree vec = SWITCH_LABELS(node); size_t i, n = TREE_VEC_LENGTH(vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT(vec, i); newline_and_indent(buffer, spc + 4); if (elt) { dump_generic_node(buffer, elt, spc + 4, flags, false); pp_string(buffer, " goto "); dump_generic_node(buffer, CASE_LABEL(elt), spc + 4, flags, true); pp_semicolon(buffer); } else pp_string(buffer, "case ???: goto ???;"); } } newline_and_indent(buffer, spc + 2); pp_character(buffer, '}'); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION(node); if (TREE_CODE(op0) != SSA_NAME && DECL_P(op0) && DECL_NAME(op0)) { const char *name = IDENTIFIER_POINTER(DECL_NAME(op0)); if (strcmp(name, "break") == 0 || strcmp(name, "continue") == 0) { pp_string(buffer, name); break; } } pp_string(buffer, "goto "); dump_generic_node(buffer, op0, spc, flags, false); break; case RESX_EXPR: pp_string(buffer, "resx "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); break; case ASM_EXPR: pp_string(buffer, "__asm__"); if (ASM_VOLATILE_P(node)) pp_string(buffer, " __volatile__"); pp_character(buffer, '('); dump_generic_node(buffer, ASM_STRING(node), spc, flags, false); pp_character(buffer, ':'); dump_generic_node(buffer, ASM_OUTPUTS(node), spc, flags, false); pp_character(buffer, ':'); dump_generic_node(buffer, ASM_INPUTS(node), spc, flags, false); if (ASM_CLOBBERS(node)) { pp_character(buffer, ':'); dump_generic_node(buffer, ASM_CLOBBERS(node), spc, flags, false); } pp_string(buffer, ")"); break; case CASE_LABEL_EXPR: if (CASE_LOW(node) && CASE_HIGH(node)) { pp_string(buffer, "case "); dump_generic_node(buffer, CASE_LOW(node), spc, flags, false); pp_string(buffer, " ... "); dump_generic_node(buffer, CASE_HIGH(node), spc, flags, false); } else if (CASE_LOW(node)) { pp_string(buffer, "case "); dump_generic_node(buffer, CASE_LOW(node), spc, flags, false); } else pp_string(buffer, "default "); pp_character(buffer, ':'); break; case OBJ_TYPE_REF: pp_string(buffer, "OBJ_TYPE_REF("); dump_generic_node(buffer, OBJ_TYPE_REF_EXPR(node), spc, flags, false); pp_character(buffer, ';'); dump_generic_node(buffer, OBJ_TYPE_REF_OBJECT(node), spc, flags, false); pp_character(buffer, '-'); pp_character(buffer, '>'); dump_generic_node(buffer, OBJ_TYPE_REF_TOKEN(node), spc, flags, false); pp_character(buffer, ')'); break; case PHI_NODE: { int i; dump_generic_node(buffer, PHI_RESULT(node), spc, flags, false); pp_string(buffer, " = PHI <"); for (i = 0; i < PHI_NUM_ARGS(node); i++) { dump_generic_node(buffer, PHI_ARG_DEF(node, i), spc, flags, false); pp_string(buffer, "("); pp_decimal_int(buffer, PHI_ARG_EDGE(node, i)->src->index); pp_string(buffer, ")"); if (i < PHI_NUM_ARGS(node) - 1) pp_string(buffer, ", "); } pp_string(buffer, ">"); if (stmt_references_memory_p(node) && (flags & TDF_MEMSYMS)) dump_symbols(buffer, STORED_SYMS(node), flags); } break; case SSA_NAME: dump_generic_node(buffer, SSA_NAME_VAR(node), spc, flags, false); pp_string(buffer, "_"); pp_decimal_int(buffer, SSA_NAME_VERSION(node)); if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI(node)) pp_string(buffer, "(ab)"); else if (SSA_NAME_IS_DEFAULT_DEF(node)) pp_string(buffer, "(D)"); break; case WITH_SIZE_EXPR: pp_string(buffer, "WITH_SIZE_EXPR <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, ">"); break; case VALUE_HANDLE: pp_printf(buffer, "VH.%d", VALUE_HANDLE_ID(node)); break; case ASSERT_EXPR: pp_string(buffer, "ASSERT_EXPR <"); dump_generic_node(buffer, ASSERT_EXPR_VAR(node), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, ASSERT_EXPR_COND(node), spc, flags, false); pp_string(buffer, ">"); break; case SCEV_KNOWN: pp_string(buffer, "scev_known"); break; case SCEV_NOT_KNOWN: pp_string(buffer, "scev_not_known"); break; case POLYNOMIAL_CHREC: pp_string(buffer, "{"); dump_generic_node(buffer, CHREC_LEFT(node), spc, flags, false); pp_string(buffer, ", +, "); dump_generic_node(buffer, CHREC_RIGHT(node), spc, flags, false); pp_string(buffer, "}_"); dump_generic_node(buffer, CHREC_VAR(node), spc, flags, false); is_stmt = false; break; case REALIGN_LOAD_EXPR: pp_string(buffer, "REALIGN_LOAD <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 2), spc, flags, false); pp_string(buffer, ">"); break; case VEC_COND_EXPR: pp_string(buffer, " VEC_COND_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " , "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " , "); dump_generic_node(buffer, TREE_OPERAND(node, 2), spc, flags, false); pp_string(buffer, " > "); break; case DOT_PROD_EXPR: pp_string(buffer, " DOT_PROD_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 2), spc, flags, false); pp_string(buffer, " > "); break; case OMP_PARALLEL: pp_string(buffer, "#pragma omp parallel"); dump_omp_clauses(buffer, OMP_PARALLEL_CLAUSES(node), spc, flags); if (OMP_PARALLEL_FN(node)) { pp_string(buffer, " [child fn: "); dump_generic_node(buffer, OMP_PARALLEL_FN(node), spc, flags, false); pp_string(buffer, " ("); if (OMP_PARALLEL_DATA_ARG(node)) dump_generic_node(buffer, OMP_PARALLEL_DATA_ARG(node), spc, flags, false); else pp_string(buffer, "???"); pp_string(buffer, ")]"); } dump_omp_body: if (!(flags & TDF_SLIM) && OMP_BODY(node)) { newline_and_indent(buffer, spc + 2); pp_character(buffer, '{'); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, OMP_BODY(node), spc + 4, flags, false); newline_and_indent(buffer, spc + 2); pp_character(buffer, '}'); } is_expr = false; break; case OMP_FOR: pp_string(buffer, "#pragma omp for"); dump_omp_clauses(buffer, OMP_FOR_CLAUSES(node), spc, flags); if (!(flags & TDF_SLIM)) { if (OMP_FOR_PRE_BODY(node)) { newline_and_indent(buffer, spc + 2); pp_character(buffer, '{'); spc += 4; newline_and_indent(buffer, spc); dump_generic_node(buffer, OMP_FOR_PRE_BODY(node), spc, flags, false); } newline_and_indent(buffer, spc); pp_string(buffer, "for ("); dump_generic_node(buffer, OMP_FOR_INIT(node), spc, flags, false); pp_string(buffer, "; "); dump_generic_node(buffer, OMP_FOR_COND(node), spc, flags, false); pp_string(buffer, "; "); dump_generic_node(buffer, OMP_FOR_INCR(node), spc, flags, false); pp_string(buffer, ")"); if (OMP_FOR_BODY(node)) { newline_and_indent(buffer, spc + 2); pp_character(buffer, '{'); newline_and_indent(buffer, spc + 4); dump_generic_node(buffer, OMP_FOR_BODY(node), spc + 4, flags, false); newline_and_indent(buffer, spc + 2); pp_character(buffer, '}'); } if (OMP_FOR_PRE_BODY(node)) { spc -= 4; newline_and_indent(buffer, spc + 2); pp_character(buffer, '}'); } } is_expr = false; break; case OMP_SECTIONS: pp_string(buffer, "#pragma omp sections"); if (OMP_SECTIONS_CONTROL(node)) { pp_string(buffer, " <"); dump_generic_node(buffer, OMP_SECTIONS_CONTROL(node), spc, flags, false); pp_string(buffer, ">"); } dump_omp_clauses(buffer, OMP_SECTIONS_CLAUSES(node), spc, flags); goto dump_omp_body; case OMP_SECTIONS_SWITCH: pp_string(buffer, "OMP_SECTIONS_SWITCH"); is_expr = false; break; case OMP_SECTION: pp_string(buffer, "#pragma omp section"); goto dump_omp_body; case OMP_MASTER: pp_string(buffer, "#pragma omp master"); goto dump_omp_body; case OMP_ORDERED: pp_string(buffer, "#pragma omp ordered"); goto dump_omp_body; case OMP_CRITICAL: pp_string(buffer, "#pragma omp critical"); if (OMP_CRITICAL_NAME(node)) { pp_space(buffer); pp_character(buffer, '('); dump_generic_node(buffer, OMP_CRITICAL_NAME(node), spc, flags, false); pp_character(buffer, ')'); } goto dump_omp_body; case OMP_ATOMIC: pp_string(buffer, "#pragma omp atomic"); newline_and_indent(buffer, spc + 2); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_space(buffer); pp_character(buffer, '='); pp_space(buffer); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); break; case OMP_ATOMIC_LOAD: pp_string(buffer, "#pragma omp atomic_load"); newline_and_indent(buffer, spc + 2); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_space(buffer); pp_character(buffer, '='); pp_space(buffer); pp_character(buffer, '*'); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); break; case OMP_ATOMIC_STORE: pp_string(buffer, "#pragma omp atomic_store ("); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_character(buffer, ')'); break; case OMP_SINGLE: pp_string(buffer, "#pragma omp single"); dump_omp_clauses(buffer, OMP_SINGLE_CLAUSES(node), spc, flags); goto dump_omp_body; case OMP_RETURN: pp_string(buffer, "OMP_RETURN"); if (OMP_RETURN_NOWAIT(node)) pp_string(buffer, " [nowait]"); is_expr = false; break; case OMP_CONTINUE: pp_string(buffer, "OMP_CONTINUE <"); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " <- "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, ">"); is_expr = false; break; case OMP_CLAUSE: dump_omp_clause(buffer, node, spc, flags); is_expr = false; break; case REDUC_MAX_EXPR: pp_string(buffer, " REDUC_MAX_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " > "); break; case REDUC_MIN_EXPR: pp_string(buffer, " REDUC_MIN_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " > "); break; case REDUC_PLUS_EXPR: pp_string(buffer, " REDUC_PLUS_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " > "); break; case VEC_WIDEN_MULT_HI_EXPR: pp_string(buffer, " VEC_WIDEN_MULT_HI_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; case VEC_WIDEN_MULT_LO_EXPR: pp_string(buffer, " VEC_WIDEN_MULT_LO_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; case VEC_UNPACK_HI_EXPR: pp_string(buffer, " VEC_UNPACK_HI_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " > "); break; case VEC_UNPACK_LO_EXPR: pp_string(buffer, " VEC_UNPACK_LO_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " > "); break; case VEC_UNPACK_FLOAT_HI_EXPR: pp_string(buffer, " VEC_UNPACK_FLOAT_HI_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " > "); break; case VEC_UNPACK_FLOAT_LO_EXPR: pp_string(buffer, " VEC_UNPACK_FLOAT_LO_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, " > "); break; case VEC_PACK_TRUNC_EXPR: pp_string(buffer, " VEC_PACK_TRUNC_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; case VEC_PACK_SAT_EXPR: pp_string(buffer, " VEC_PACK_SAT_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; case VEC_PACK_FIX_TRUNC_EXPR: pp_string(buffer, " VEC_PACK_FIX_TRUNC_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; case BLOCK: { tree t; pp_string(buffer, "BLOCK"); if (BLOCK_ABSTRACT(node)) pp_string(buffer, " [abstract]"); if (TREE_ASM_WRITTEN(node)) pp_string(buffer, " [written]"); newline_and_indent(buffer, spc + 2); if (BLOCK_SUPERCONTEXT(node)) { pp_string(buffer, "SUPERCONTEXT: "); if (TREE_CODE(BLOCK_SUPERCONTEXT(node)) == BLOCK) pp_printf(buffer, "BLOCK %p", (void *)BLOCK_SUPERCONTEXT(node)); else dump_generic_node(buffer, BLOCK_SUPERCONTEXT(node), 0, flags, false); newline_and_indent(buffer, spc + 2); } if (BLOCK_SUBBLOCKS(node)) { pp_string(buffer, "SUBBLOCKS: "); for (t = BLOCK_SUBBLOCKS(node); t; t = BLOCK_CHAIN(t)) pp_printf(buffer, "%p ", (void *)t); newline_and_indent(buffer, spc + 2); } if (BLOCK_VARS(node)) { pp_string(buffer, "VARS: "); for (t = BLOCK_VARS(node); t; t = TREE_CHAIN(t)) { dump_generic_node(buffer, t, 0, flags, false); pp_string(buffer, " "); } newline_and_indent(buffer, spc + 2); } if (BLOCK_ABSTRACT_ORIGIN(node)) { pp_string(buffer, "ABSTRACT_ORIGIN: "); if (TREE_CODE(BLOCK_ABSTRACT_ORIGIN(node)) == BLOCK) pp_printf(buffer, "BLOCK %p", (void *)BLOCK_ABSTRACT_ORIGIN(node)); else dump_generic_node(buffer, BLOCK_ABSTRACT_ORIGIN(node), 0, flags, false); newline_and_indent(buffer, spc + 2); } } break; case VEC_EXTRACT_EVEN_EXPR: pp_string(buffer, " VEC_EXTRACT_EVEN_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; case VEC_EXTRACT_ODD_EXPR: pp_string(buffer, " VEC_EXTRACT_ODD_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; case VEC_INTERLEAVE_HIGH_EXPR: pp_string(buffer, " VEC_INTERLEAVE_HIGH_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; case VEC_INTERLEAVE_LOW_EXPR: pp_string(buffer, " VEC_INTERLEAVE_LOW_EXPR < "); dump_generic_node(buffer, TREE_OPERAND(node, 0), spc, flags, false); pp_string(buffer, ", "); dump_generic_node(buffer, TREE_OPERAND(node, 1), spc, flags, false); pp_string(buffer, " > "); break; default: NIY; } if (is_stmt && is_expr) pp_semicolon(buffer); /* * If we're building a diagnostic, the formatted text will be written * into BUFFER's stream by the caller; otherwise, write it now. */ if (!(flags & TDF_DIAGNOSTIC)) pp_write_text_to_stream(buffer); return spc; } /* Print the declaration of a variable. */ static void print_declaration(pretty_printer * buffer, tree t, int spc, int flags) { INDENT(spc); if (TREE_CODE(t) == TYPE_DECL) pp_string(buffer, "typedef "); if (CODE_CONTAINS_STRUCT(TREE_CODE(t), TS_DECL_WRTL) && DECL_REGISTER(t)) pp_string(buffer, "register "); if (TREE_PUBLIC(t) && DECL_EXTERNAL(t)) pp_string(buffer, "extern "); else if (TREE_STATIC(t)) pp_string(buffer, "static "); /* Print the type and name. */ if (TREE_CODE(TREE_TYPE(t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE(t); while (TREE_CODE(TREE_TYPE(tmp)) == ARRAY_TYPE) tmp = TREE_TYPE(tmp); dump_generic_node(buffer, TREE_TYPE(tmp), spc, flags, false); /* Print variable's name. */ pp_space(buffer); dump_generic_node(buffer, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE(t); while (TREE_CODE(tmp) == ARRAY_TYPE) { dump_array_domain(buffer, TYPE_DOMAIN(tmp), spc, flags); tmp = TREE_TYPE(tmp); } } else if (TREE_CODE(t) == FUNCTION_DECL) { dump_generic_node(buffer, TREE_TYPE(TREE_TYPE(t)), spc, flags, false); pp_space(buffer); dump_decl_name(buffer, t, flags); dump_function_declaration(buffer, TREE_TYPE(t), spc, flags); } else { /* Print type declaration. */ dump_generic_node(buffer, TREE_TYPE(t), spc, flags, false); /* Print variable's name. */ pp_space(buffer); dump_generic_node(buffer, t, spc, flags, false); } if (TREE_CODE(t) == VAR_DECL && DECL_HARD_REGISTER(t)) { pp_string(buffer, " __asm__ "); pp_character(buffer, '('); dump_generic_node(buffer, DECL_ASSEMBLER_NAME(t), spc, flags, false); pp_character(buffer, ')'); } /* * The initial value of a function serves to determine wether the * function is declared or defined. So the following does not apply to * function nodes. */ if (TREE_CODE(t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL(t)) { pp_space(buffer); pp_character(buffer, '='); pp_space(buffer); dump_generic_node(buffer, DECL_INITIAL(t), spc, flags, false); } } if (TREE_CODE(t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P(t)) { pp_string(buffer, " [value-expr: "); dump_generic_node(buffer, DECL_VALUE_EXPR(t), spc, flags, false); pp_character(buffer, ']'); } pp_character(buffer, ';'); } /* * Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl(pretty_printer * buffer, const_tree node, int spc, int flags) { /* Print the name of the structure. */ if (TYPE_NAME(node)) { INDENT(spc); if (TREE_CODE(node) == RECORD_TYPE) pp_string(buffer, "struct "); else if ((TREE_CODE(node) == UNION_TYPE || TREE_CODE(node) == QUAL_UNION_TYPE)) pp_string(buffer, "union "); dump_generic_node(buffer, TYPE_NAME(node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline(buffer); INDENT(spc); pp_character(buffer, '{'); pp_newline(buffer); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS(node); while (tmp) { /* Avoid to print recursively the structure. */ /* * FIXME : Not implemented correctly..., what about the case when * we have a cycle in the contain graph? ... Maybe this could be * solved by looking at the scope in which the structure was * declared. */ if (TREE_TYPE(tmp) != node || (TREE_CODE(TREE_TYPE(tmp)) == POINTER_TYPE && TREE_TYPE(TREE_TYPE(tmp)) != node)) { print_declaration(buffer, tmp, spc + 2, flags); pp_newline(buffer); } tmp = TREE_CHAIN(tmp); } } INDENT(spc); pp_character(buffer, '}'); } /* * Return the priority of the operator OP. * * From lowest to highest precedence with either left-to-right (L-R) or * right-to-left (R-L) associativity]: * * 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] * ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] * & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 * [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] * fn() [] -> . * * unary +, - and * have higher precedence than the corresponding binary * operators. */ static int op_prio(const_tree op) { if (op == NULL) return 9999; switch (TREE_CODE(op)) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: return 11; case WIDEN_SUM_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: return 12; case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case WIDEN_MULT_EXPR: case DOT_PROD_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: case NOP_EXPR: case CONVERT_EXPR: case FIX_TRUNC_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case REDUC_MAX_EXPR: case REDUC_MIN_EXPR: case REDUC_PLUS_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case VEC_UNPACK_HI_EXPR: case VEC_UNPACK_LO_EXPR: case VEC_UNPACK_FLOAT_HI_EXPR: case VEC_UNPACK_FLOAT_LO_EXPR: case VEC_PACK_TRUNC_EXPR: case VEC_PACK_SAT_EXPR: return 16; case SAVE_EXPR: case NON_LVALUE_EXPR: return op_prio(TREE_OPERAND(op, 0)); default: /* * Return an arbitrarily high precedence to avoid surrounding single * VAR_DECLs in ()s. */ return 9999; } } /* Return the symbol associated with operator CODE. */ const char * op_symbol_code(enum tree_code code) { switch (code) { case MODIFY_EXPR: case GIMPLE_MODIFY_STMT: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case LROTATE_EXPR: return "r<<"; case RROTATE_EXPR: return "r>>"; case VEC_LSHIFT_EXPR: return "v<<"; case VEC_RSHIFT_EXPR: return "v>>"; case POINTER_PLUS_EXPR: return "+"; case PLUS_EXPR: return "+"; case REDUC_PLUS_EXPR: return "r+"; case WIDEN_SUM_EXPR: return "w+"; case WIDEN_MULT_EXPR: return "w*"; case NEGATE_EXPR: case MINUS_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case ALIGN_INDIRECT_REF: return "A*"; case MISALIGNED_INDIRECT_REF: return "M*"; case TRUNC_DIV_EXPR: case RDIV_EXPR: return "/"; case CEIL_DIV_EXPR: return "/[cl]"; case FLOOR_DIV_EXPR: return "/[fl]"; case ROUND_DIV_EXPR: return "/[rd]"; case EXACT_DIV_EXPR: return "/[ex]"; case TRUNC_MOD_EXPR: return "%"; case CEIL_MOD_EXPR: return "%[cl]"; case FLOOR_MOD_EXPR: return "%[fl]"; case ROUND_MOD_EXPR: return "%[rd]"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case MAX_EXPR: return "max"; case MIN_EXPR: return "min"; default: return "<<< ??? >>>"; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol(const_tree op) { return op_symbol_code(TREE_CODE(op)); } /* Prints the name of a CALL_EXPR. */ static void print_call_name(pretty_printer * buffer, const_tree node) { tree op0; gcc_assert(TREE_CODE(node) == CALL_EXPR); op0 = CALL_EXPR_FN(node); if (TREE_CODE(op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND(op0, 0); switch (TREE_CODE(op0)) { case VAR_DECL: case PARM_DECL: dump_function_name(buffer, op0); break; case ADDR_EXPR: case INDIRECT_REF: case NOP_EXPR: dump_generic_node(buffer, TREE_OPERAND(op0, 0), 0, 0, false); break; case COND_EXPR: pp_string(buffer, "("); dump_generic_node(buffer, TREE_OPERAND(op0, 0), 0, 0, false); pp_string(buffer, ") ? "); dump_generic_node(buffer, TREE_OPERAND(op0, 1), 0, 0, false); pp_string(buffer, " : "); dump_generic_node(buffer, TREE_OPERAND(op0, 2), 0, 0, false); break; case COMPONENT_REF: /* The function is a pointer contained in a structure. */ if (TREE_CODE(TREE_OPERAND(op0, 0)) == INDIRECT_REF || TREE_CODE(TREE_OPERAND(op0, 0)) == VAR_DECL) dump_function_name(buffer, TREE_OPERAND(op0, 1)); else dump_generic_node(buffer, TREE_OPERAND(op0, 0), 0, 0, false); /* * else We can have several levels of structures and a function * pointer inside. This is not implemented yet... */ /* NIY; */ break; case ARRAY_REF: if (TREE_CODE(TREE_OPERAND(op0, 0)) == VAR_DECL) dump_function_name(buffer, TREE_OPERAND(op0, 0)); else dump_generic_node(buffer, op0, 0, 0, false); break; case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node(buffer, op0, 0, 0, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string(pretty_printer * buffer, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string(buffer, "\\b"); break; case '\f': pp_string(buffer, "\\f"); break; case '\n': pp_string(buffer, "\\n"); break; case '\r': pp_string(buffer, "\\r"); break; case '\t': pp_string(buffer, "\\t"); break; case '\v': pp_string(buffer, "\\v"); break; case '\\': pp_string(buffer, "\\\\"); break; case '\"': pp_string(buffer, "\\\""); break; case '\'': pp_string(buffer, "\\'"); break; /* No need to handle \0; the loop terminates on \0. */ case '\1': pp_string(buffer, "\\1"); break; case '\2': pp_string(buffer, "\\2"); break; case '\3': pp_string(buffer, "\\3"); break; case '\4': pp_string(buffer, "\\4"); break; case '\5': pp_string(buffer, "\\5"); break; case '\6': pp_string(buffer, "\\6"); break; case '\7': pp_string(buffer, "\\7"); break; default: pp_character(buffer, str[0]); break; } str++; } } static void maybe_init_pretty_print(FILE * file) { if (!initialized) { pp_construct(&buffer, /* prefix */ NULL, /* line-width */ 0); pp_needs_newline(&buffer) = true; initialized = 1; } buffer.buffer->stream = file; } static void newline_and_indent(pretty_printer * buffer, int spc) { pp_newline(buffer); INDENT(spc); } static void dump_vops(pretty_printer * buffer, tree stmt, int spc, int flags) { struct voptype_d *vdefs; struct voptype_d *vuses; int i, n; if (!ssa_operands_active() || !stmt_references_memory_p(stmt)) return; /* * Even if the statement doesn't have virtual operators yet, it may * contain symbol information (this happens before aliases have been * computed). */ if ((flags & TDF_MEMSYMS) && VUSE_OPS(stmt) == NULL && VDEF_OPS(stmt) == NULL) { if (LOADED_SYMS(stmt)) { pp_string(buffer, "# LOADS: "); dump_symbols(buffer, LOADED_SYMS(stmt), flags); newline_and_indent(buffer, spc); } if (STORED_SYMS(stmt)) { pp_string(buffer, "# STORES: "); dump_symbols(buffer, STORED_SYMS(stmt), flags); newline_and_indent(buffer, spc); } return; } vuses = VUSE_OPS(stmt); while (vuses) { pp_string(buffer, "# VUSE <"); n = VUSE_NUM(vuses); for (i = 0; i < n; i++) { dump_generic_node(buffer, VUSE_OP(vuses, i), spc + 2, flags, false); if (i < n - 1) pp_string(buffer, ", "); } pp_string(buffer, ">"); if (flags & TDF_MEMSYMS) dump_symbols(buffer, LOADED_SYMS(stmt), flags); newline_and_indent(buffer, spc); vuses = vuses->next; } vdefs = VDEF_OPS(stmt); while (vdefs) { pp_string(buffer, "# "); dump_generic_node(buffer, VDEF_RESULT(vdefs), spc + 2, flags, false); pp_string(buffer, " = VDEF <"); n = VDEF_NUM(vdefs); for (i = 0; i < n; i++) { dump_generic_node(buffer, VDEF_OP(vdefs, i), spc + 2, flags, 0); if (i < n - 1) pp_string(buffer, ", "); } pp_string(buffer, ">"); if ((flags & TDF_MEMSYMS) && vdefs->next == NULL) dump_symbols(buffer, STORED_SYMS(stmt), flags); newline_and_indent(buffer, spc); vdefs = vdefs->next; } } /* * Dumps basic block BB to FILE with details described by FLAGS and indented * by INDENT spaces. */ void dump_generic_bb(FILE * file, basic_block bb, int indent, int flags) { maybe_init_pretty_print(file); dump_generic_bb_buff(&buffer, bb, indent, flags); pp_flush(&buffer); } /* * Dumps header of basic block BB to buffer BUFFER indented by INDENT spaces * and details described by flags. */ static void dump_bb_header(pretty_printer * buffer, basic_block bb, int indent, int flags) { edge e; tree stmt; edge_iterator ei; if (flags & TDF_BLOCKS) { INDENT(indent); pp_string(buffer, "# BLOCK "); pp_decimal_int(buffer, bb->index); if (bb->frequency) { pp_string(buffer, " freq:"); pp_decimal_int(buffer, bb->frequency); } if (bb->count) { pp_string(buffer, " count:"); pp_widest_integer(buffer, bb->count); } if (flags & TDF_LINENO) { block_stmt_iterator bsi; for (bsi = bsi_start(bb); !bsi_end_p(bsi); bsi_next(&bsi)) if (get_lineno(bsi_stmt(bsi)) != -1) { pp_string(buffer, ", starting at line "); pp_decimal_int(buffer, get_lineno(bsi_stmt(bsi))); break; } } newline_and_indent(buffer, indent); pp_string(buffer, "# PRED:"); pp_write_text_to_stream(buffer); FOR_EACH_EDGE(e, ei, bb->preds) if (flags & TDF_SLIM) { pp_string(buffer, " "); if (e->src == ENTRY_BLOCK_PTR) pp_string(buffer, "ENTRY"); else pp_decimal_int(buffer, e->src->index); } else dump_edge_info(buffer->buffer->stream, e, 0); pp_newline(buffer); } else { stmt = first_stmt(bb); if (!stmt || TREE_CODE(stmt) != LABEL_EXPR) { INDENT(indent - 2); pp_string(buffer, "<bb "); pp_decimal_int(buffer, bb->index); pp_string(buffer, ">:"); pp_newline(buffer); } } pp_write_text_to_stream(buffer); check_bb_profile(bb, buffer->buffer->stream); } /* * Dumps end of basic block BB to buffer BUFFER indented by INDENT spaces. */ static void dump_bb_end(pretty_printer * buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; INDENT(indent); pp_string(buffer, "# SUCC:"); pp_write_text_to_stream(buffer); FOR_EACH_EDGE(e, ei, bb->succs) if (flags & TDF_SLIM) { pp_string(buffer, " "); if (e->dest == EXIT_BLOCK_PTR) pp_string(buffer, "EXIT"); else pp_decimal_int(buffer, e->dest->index); } else dump_edge_info(buffer->buffer->stream, e, 1); pp_newline(buffer); } /* * Dump PHI nodes of basic block BB to BUFFER with details described by FLAGS * and indented by INDENT spaces. */ static void dump_phi_nodes(pretty_printer * buffer, basic_block bb, int indent, int flags) { tree phi = phi_nodes(bb); if (!phi) return; for (; phi; phi = PHI_CHAIN(phi)) { if (is_gimple_reg(PHI_RESULT(phi)) || (flags & TDF_VOPS)) { INDENT(indent); pp_string(buffer, "# "); dump_generic_node(buffer, phi, indent, flags, false); pp_newline(buffer); } } } /* * Dump jump to basic block BB that is represented implicitly in the cfg to * BUFFER. */ static void pp_cfg_jump(pretty_printer * buffer, basic_block bb) { tree stmt; stmt = first_stmt(bb); pp_string(buffer, "goto <bb "); pp_decimal_int(buffer, bb->index); pp_string(buffer, ">"); if (stmt && TREE_CODE(stmt) == LABEL_EXPR) { pp_string(buffer, " ("); dump_generic_node(buffer, LABEL_EXPR_LABEL(stmt), 0, 0, false); pp_string(buffer, ")"); } pp_semicolon(buffer); } /* * Dump edges represented implicitly in basic block BB to BUFFER, indented by * INDENT spaces, with details given by FLAGS. */ static void dump_implicit_edges(pretty_printer * buffer, basic_block bb, int indent, int flags) { edge e; edge_iterator ei; tree stmt; stmt = last_stmt(bb); if (stmt && TREE_CODE(stmt) == COND_EXPR) { edge true_edge, false_edge; /* * When we are emitting the code or changing CFG, it is possible that * the edges are not yet created. When we are using debug_bb in such * a situation, we do not want it to crash. */ if (EDGE_COUNT(bb->succs) != 2) return; extract_true_false_edges_from_block(bb, &true_edge, &false_edge); INDENT(indent + 2); pp_cfg_jump(buffer, true_edge->dest); newline_and_indent(buffer, indent); pp_string(buffer, "else"); newline_and_indent(buffer, indent + 2); pp_cfg_jump(buffer, false_edge->dest); pp_newline(buffer); return; } /* * If there is a fallthru edge, we may need to add an artificial goto to * the dump. */ FOR_EACH_EDGE(e, ei, bb->succs) if (e->flags & EDGE_FALLTHRU) break; if (e && e->dest != bb->next_bb) { INDENT(indent); if ((flags & TDF_LINENO) #ifdef USE_MAPPED_LOCATION && e->goto_locus != UNKNOWN_LOCATION #else && e->goto_locus #endif ) { expanded_location goto_xloc; #ifdef USE_MAPPED_LOCATION goto_xloc = expand_location(e->goto_locus); #else goto_xloc = *e->goto_locus; #endif pp_character(buffer, '['); if (goto_xloc.file) { pp_string(buffer, goto_xloc.file); pp_string(buffer, " : "); } pp_decimal_int(buffer, goto_xloc.line); pp_string(buffer, "] "); } pp_cfg_jump(buffer, e->dest); pp_newline(buffer); } } /* * Dumps basic block BB to buffer BUFFER with details described by FLAGS and * indented by INDENT spaces. */ static void dump_generic_bb_buff(pretty_printer * buffer, basic_block bb, int indent, int flags) { block_stmt_iterator bsi; tree stmt; int label_indent = indent - 2; if (label_indent < 0) label_indent = 0; dump_bb_header(buffer, bb, indent, flags); dump_phi_nodes(buffer, bb, indent, flags); for (bsi = bsi_start(bb); !bsi_end_p(bsi); bsi_next(&bsi)) { int curr_indent; stmt = bsi_stmt(bsi); curr_indent = TREE_CODE(stmt) == LABEL_EXPR ? label_indent : indent; INDENT(curr_indent); dump_generic_node(buffer, stmt, curr_indent, flags, true); pp_newline(buffer); dump_histograms_for_stmt(cfun, buffer->buffer->stream, stmt); } dump_implicit_edges(buffer, bb, indent, flags); if (flags & TDF_BLOCKS) dump_bb_end(buffer, bb, indent, flags); }
GB_unaryop__identity_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_fp64 // op(A') function: GB_tran__identity_bool_fp64 // C type: bool // A type: double // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_fp64 // op(A') function: GB_tran__identity_bool_fp64 // C type: bool // A type: double // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_fp64 // op(A') function: GB_tran__identity_bool_fp64 // C type: bool // A type: double // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SpatialFractionalMaxPooling.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialFractionalMaxPooling.c" #else static int64_t* THNN_(SpatialFractionalMaxPooling_generateIntervals)( real sample, int64_t inputSize, int64_t outputSize, int poolSize) { real alpha = (real) (inputSize - poolSize) / (real) (outputSize - 1); int64_t* sequence = (int64_t*) THAlloc(sizeof(int64_t) * outputSize); int64_t i; for (i = 0; i < outputSize - 1; ++i) { sequence[i] = (int64_t) ((i + sample) * alpha) - (int64_t) (sample * alpha); } sequence[outputSize - 1] = inputSize - poolSize; return sequence; } static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( real* input, real* output, THIndex_t* indices, real* randomSamples, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH, int poolSizeW, int poolSizeH) { int64_t plane; #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; ++plane) { /* each plane contains 2 random samples, one for W and one for H */ real* randomSamplesForPlane = randomSamples + plane * 2; /* Generate interval sequence */ int64_t* sequenceW = THNN_(SpatialFractionalMaxPooling_generateIntervals)( randomSamplesForPlane[0], inputW, outputW, poolSizeW); int64_t* sequenceH = THNN_(SpatialFractionalMaxPooling_generateIntervals)( randomSamplesForPlane[1], inputH, outputH, poolSizeH); /* loop over output */ int64_t h, w; real* inputForPlane = input + plane * inputW * inputH; real* outputForPlane = output + plane * outputW * outputH; THIndex_t* indicesForPlane = indices + plane * outputW * outputH; for (h = 0; h < outputH; ++h) { int64_t inputHStart = sequenceH[h]; for (w = 0; w < outputW; ++w) { int64_t inputWStart = sequenceW[w]; real maxVal = -THInf; int64_t maxIndex = -1; int64_t h2, w2; for (h2 = inputHStart; h2 < inputHStart + poolSizeH; ++h2) { for (w2 = inputWStart; w2 < inputWStart + poolSizeW; ++w2) { THAssert(h2 >= 0 && h2 < inputH); THAssert(w2 >= 0 && w2 < inputW); int64_t planeIndex = h2 * inputW + w2; real val = inputForPlane[planeIndex]; if (val > maxVal) { maxVal = val; maxIndex = planeIndex; } } } THAssert(maxVal != -THInf); THAssert(maxIndex != -1); outputForPlane[h * outputW + w] = maxVal; /* +1 to lua index */ indicesForPlane[h * outputW + w] = maxIndex + TH_INDEX_BASE; } } THFree(sequenceW); THFree(sequenceH); } } void THNN_(SpatialFractionalMaxPooling_updateOutput)( THNNState *state, THTensor *input, THTensor *output, int outputW, int outputH, int poolSizeW, int poolSizeH, THIndexTensor *indices, THTensor *randomSamples) { int64_t numBatch = 1; int planeDim = 0; int heightDim = 1; int widthDim = 2; int64_t numInputDims = THTensor_(nDimension)(input); THNN_ARGCHECK(numInputDims == 3 || numInputDims == 4, 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); if (numInputDims == 4) { numBatch = THTensor_(size)(input, 0); planeDim++; heightDim++; widthDim++; } /* sizes */ int64_t numPlanes = THTensor_(size)(input, planeDim); int64_t inputH = THTensor_(size)(input, heightDim); int64_t inputW = THTensor_(size)(input, widthDim); THArgCheck(outputH + poolSizeH - 1 <= inputH, 7, "poolSizeH (%d) too large relative to input height (%d)", poolSizeH, inputH); THArgCheck(outputW + poolSizeW - 1 <= inputW, 6, "poolSizeW (%d) too large relative to input width (%d)", poolSizeW, inputW); /* get contiguous input */ input = THTensor_(newContiguous)(input); if (numInputDims == 3) { /* resize output */ THTensor_(resize3d)(output, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THIndexTensor_(resize3d)(indices, numPlanes, outputH, outputW); THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( THTensor_(data)(input), THTensor_(data)(output), THIndexTensor_(data)(indices), THTensor_(data)(randomSamples), numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } else { THTensor_(resize4d)(output, numBatch, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THIndexTensor_(resize4d)(indices, numBatch, numPlanes, outputH, outputW); int64_t batch; #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateOutput_frame)( THTensor_(data)(input) + batch * numPlanes * inputH * inputW, THTensor_(data)(output) + batch * numPlanes * outputH * outputW, THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW, THTensor_(data)(randomSamples) + batch * numPlanes * 2, numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } } /* cleanup */ THTensor_(free)(input); } static void THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( real* gradInput, real* gradOutput, THIndex_t* indices, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH) { int64_t plane; #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; plane++) { real* gradInputForPlane = gradInput + plane * inputW * inputH; real* gradOutputForPlane = gradOutput + plane * outputW * outputH; THIndex_t* indicesForPlane = indices + plane * outputW * outputH; int64_t h, w; for (h = 0; h < outputH; ++h) { for (w = 0; w < outputW; ++w) { int64_t outputIndex = h * outputW + w; int64_t index = indicesForPlane[outputIndex] - TH_INDEX_BASE; THAssert(index >= 0 && index < inputW * inputH); gradInputForPlane[index] += gradOutputForPlane[outputIndex]; } } } } void THNN_(SpatialFractionalMaxPooling_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int outputW, int outputH, int poolSizeW, int poolSizeH, THIndexTensor *indices) { int64_t numBatch = 1; int planeDim = 0; int heightDim = 1; int widthDim = 2; int64_t numInputDims = THTensor_(nDimension)(input); if (numInputDims == 4) { numBatch = THTensor_(size)(input, 0); planeDim = 1; heightDim++; widthDim++; } /* sizes */ int64_t numPlanes = THTensor_(size)(input, planeDim); int64_t inputH = THTensor_(size)(input, heightDim); int64_t inputW = THTensor_(size)(input, widthDim); THArgCheck(outputW == THTensor_(size)(gradOutput, widthDim), 3, "gradOutput width unexpected"); THArgCheck(outputH == THTensor_(size)(gradOutput, heightDim), 3, "gradOutput height unexpected"); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous)(gradOutput); /* resize */ THTensor_(resizeAs)(gradInput, input); THTensor_(zero)(gradInput); /* backprop */ if (numInputDims == 3) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( THTensor_(data)(gradInput), THTensor_(data)(gradOutput), THIndexTensor_(data)(indices), numPlanes, inputW, inputH, outputW, outputH); } else { int64_t batch; #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame)( THTensor_(data)(gradInput) + batch * numPlanes * inputH * inputW, THTensor_(data)(gradOutput) + batch * numPlanes * outputH * outputW, THIndexTensor_(data)(indices) + batch * numPlanes * outputH * outputW, numPlanes, inputW, inputH, outputW, outputH); } } /* cleanup */ THTensor_(free)(gradOutput); } #endif
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialFractionalMaxPooling.c" #else static int64_t * THNN_(SpatialFractionalMaxPooling_generateIntervals) ( real sample, int64_t inputSize, int64_t outputSize, int poolSize) { real alpha = (real) (inputSize - poolSize) / (real) (outputSize - 1); int64_t *sequence = (int64_t *) THAlloc(sizeof(int64_t) * outputSize); int64_t i; for (i = 0; i < outputSize - 1; ++i) { sequence[i] = (int64_t) ((i + sample) * alpha) - (int64_t) (sample * alpha); } sequence[outputSize - 1] = inputSize - poolSize; return sequence; } static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame) ( real * input, real * output, THIndex_t * indices, real * randomSamples, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH, int poolSizeW, int poolSizeH) { int64_t plane; for (plane = 0; plane < numPlanes; ++plane) { /* each plane contains 2 random samples, one for W and one for H */ real *randomSamplesForPlane = randomSamples + plane * 2; /* Generate interval sequence */ int64_t *sequenceW = THNN_(SpatialFractionalMaxPooling_generateIntervals) ( randomSamplesForPlane[0], inputW, outputW, poolSizeW); int64_t *sequenceH = THNN_(SpatialFractionalMaxPooling_generateIntervals) ( randomSamplesForPlane[1], inputH, outputH, poolSizeH); /* loop over output */ int64_t h, w; real *inputForPlane = input + plane * inputW * inputH; real *outputForPlane = output + plane * outputW * outputH; THIndex_t *indicesForPlane = indices + plane * outputW * outputH; for (h = 0; h < outputH; ++h) { int64_t inputHStart = sequenceH[h]; for (w = 0; w < outputW; ++w) { int64_t inputWStart = sequenceW[w]; real maxVal = -THInf; int64_t maxIndex = -1; int64_t h2, w2; for (h2 = inputHStart; h2 < inputHStart + poolSizeH; ++h2) { for (w2 = inputWStart; w2 < inputWStart + poolSizeW; ++w2) { THAssert(h2 >= 0 && h2 < inputH); THAssert(w2 >= 0 && w2 < inputW); int64_t planeIndex = h2 * inputW + w2; real val = inputForPlane[planeIndex]; if (val > maxVal) { maxVal = val; maxIndex = planeIndex; } } } THAssert(maxVal != -THInf); THAssert(maxIndex != -1); outputForPlane[h * outputW + w] = maxVal; /* +1 to lua index */ indicesForPlane[h * outputW + w] = maxIndex + TH_INDEX_BASE; } } THFree(sequenceW); THFree(sequenceH); } } void THNN_(SpatialFractionalMaxPooling_updateOutput) ( THNNState * state, THTensor * input, THTensor * output, int outputW, int outputH, int poolSizeW, int poolSizeH, THIndexTensor * indices, THTensor * randomSamples) { int64_t numBatch = 1; int planeDim = 0; int heightDim = 1; int widthDim = 2; int64_t numInputDims = THTensor_(nDimension) (input); THNN_ARGCHECK(numInputDims == 3 || numInputDims == 4, 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); if (numInputDims == 4) { numBatch = THTensor_(size) (input, 0); planeDim++; heightDim++; widthDim++; } /* sizes */ int64_t numPlanes = THTensor_(size) (input, planeDim); int64_t inputH = THTensor_(size) (input, heightDim); int64_t inputW = THTensor_(size) (input, widthDim); THArgCheck(outputH + poolSizeH - 1 <= inputH, 7, "poolSizeH (%d) too large relative to input height (%d)", poolSizeH, inputH); THArgCheck(outputW + poolSizeW - 1 <= inputW, 6, "poolSizeW (%d) too large relative to input width (%d)", poolSizeW, inputW); /* get contiguous input */ input = THTensor_(newContiguous) (input); if (numInputDims == 3) { /* resize output */ THTensor_(resize3d) (output, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THIndexTensor_(resize3d) (indices, numPlanes, outputH, outputW); THNN_(SpatialFractionalMaxPooling_updateOutput_frame) ( THTensor_(data) (input), THTensor_(data) (output), THIndexTensor_(data) (indices), THTensor_(data) (randomSamples), numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } else { THTensor_(resize4d) (output, numBatch, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THIndexTensor_(resize4d) (indices, numBatch, numPlanes, outputH, outputW); int64_t batch; for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateOutput_frame) ( THTensor_(data) (input) + batch * numPlanes * inputH * inputW, THTensor_(data) (output) + batch * numPlanes * outputH * outputW, THIndexTensor_(data) (indices) + batch * numPlanes * outputH * outputW, THTensor_(data) (randomSamples) + batch * numPlanes * 2, numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } } /* cleanup */ THTensor_(free) (input); } static void THNN_(SpatialFractionalMaxPooling_updateGradInput_frame) ( real * gradInput, real * gradOutput, THIndex_t * indices, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH) { int64_t plane; for (plane = 0; plane < numPlanes; plane++) { real *gradInputForPlane = gradInput + plane * inputW * inputH; real *gradOutputForPlane = gradOutput + plane * outputW * outputH; THIndex_t *indicesForPlane = indices + plane * outputW * outputH; int64_t h, w; for (h = 0; h < outputH; ++h) { for (w = 0; w < outputW; ++w) { int64_t outputIndex = h * outputW + w; int64_t index = indicesForPlane[outputIndex] - TH_INDEX_BASE; THAssert(index >= 0 && index < inputW * inputH); gradInputForPlane[index] += gradOutputForPlane[outputIndex]; } } } } void THNN_(SpatialFractionalMaxPooling_updateGradInput) ( THNNState * state, THTensor * input, THTensor * gradOutput, THTensor * gradInput, int outputW, int outputH, int poolSizeW, int poolSizeH, THIndexTensor * indices) { int64_t numBatch = 1; int planeDim = 0; int heightDim = 1; int widthDim = 2; int64_t numInputDims = THTensor_(nDimension) (input); if (numInputDims == 4) { numBatch = THTensor_(size) (input, 0); planeDim = 1; heightDim++; widthDim++; } /* sizes */ int64_t numPlanes = THTensor_(size) (input, planeDim); int64_t inputH = THTensor_(size) (input, heightDim); int64_t inputW = THTensor_(size) (input, widthDim); THArgCheck(outputW == THTensor_(size) (gradOutput, widthDim), 3, "gradOutput width unexpected"); THArgCheck(outputH == THTensor_(size) (gradOutput, heightDim), 3, "gradOutput height unexpected"); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous) (gradOutput); /* resize */ THTensor_(resizeAs) (gradInput, input); THTensor_(zero) (gradInput); /* backprop */ if (numInputDims == 3) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame) ( THTensor_(data) (gradInput), THTensor_(data) (gradOutput), THIndexTensor_(data) (indices), numPlanes, inputW, inputH, outputW, outputH); } else { int64_t batch; for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame) ( THTensor_(data) (gradInput) + batch * numPlanes * inputH * inputW, THTensor_(data) (gradOutput) + batch * numPlanes * outputH * outputW, THIndexTensor_(data) (indices) + batch * numPlanes * outputH * outputW, numPlanes, inputW, inputH, outputW, outputH); } } /* cleanup */ THTensor_(free) (gradOutput); } #endif
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialFractionalMaxPooling.c" #else static int64_t * THNN_(SpatialFractionalMaxPooling_generateIntervals) ( real sample, int64_t inputSize, int64_t outputSize, int poolSize) { real alpha = (real) (inputSize - poolSize) / (real) (outputSize - 1); int64_t *sequence = (int64_t *) THAlloc(sizeof(int64_t) * outputSize); int64_t i; for (i = 0; i < outputSize - 1; ++i) { sequence[i] = (int64_t) ((i + sample) * alpha) - (int64_t) (sample * alpha); } sequence[outputSize - 1] = inputSize - poolSize; return sequence; } static void THNN_(SpatialFractionalMaxPooling_updateOutput_frame) ( real * input, real * output, THIndex_t * indices, real * randomSamples, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH, int poolSizeW, int poolSizeH) { int64_t plane; #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; ++plane) { /* each plane contains 2 random samples, one for W and one for H */ real *randomSamplesForPlane = randomSamples + plane * 2; /* Generate interval sequence */ int64_t *sequenceW = THNN_(SpatialFractionalMaxPooling_generateIntervals) ( randomSamplesForPlane[0], inputW, outputW, poolSizeW); int64_t *sequenceH = THNN_(SpatialFractionalMaxPooling_generateIntervals) ( randomSamplesForPlane[1], inputH, outputH, poolSizeH); /* loop over output */ int64_t h, w; real *inputForPlane = input + plane * inputW * inputH; real *outputForPlane = output + plane * outputW * outputH; THIndex_t *indicesForPlane = indices + plane * outputW * outputH; for (h = 0; h < outputH; ++h) { int64_t inputHStart = sequenceH[h]; for (w = 0; w < outputW; ++w) { int64_t inputWStart = sequenceW[w]; real maxVal = -THInf; int64_t maxIndex = -1; int64_t h2, w2; for (h2 = inputHStart; h2 < inputHStart + poolSizeH; ++h2) { for (w2 = inputWStart; w2 < inputWStart + poolSizeW; ++w2) { THAssert(h2 >= 0 && h2 < inputH); THAssert(w2 >= 0 && w2 < inputW); int64_t planeIndex = h2 * inputW + w2; real val = inputForPlane[planeIndex]; if (val > maxVal) { maxVal = val; maxIndex = planeIndex; } } } THAssert(maxVal != -THInf); THAssert(maxIndex != -1); outputForPlane[h * outputW + w] = maxVal; /* +1 to lua index */ indicesForPlane[h * outputW + w] = maxIndex + TH_INDEX_BASE; } } THFree(sequenceW); THFree(sequenceH); } } void THNN_(SpatialFractionalMaxPooling_updateOutput) ( THNNState * state, THTensor * input, THTensor * output, int outputW, int outputH, int poolSizeW, int poolSizeH, THIndexTensor * indices, THTensor * randomSamples) { int64_t numBatch = 1; int planeDim = 0; int heightDim = 1; int widthDim = 2; int64_t numInputDims = THTensor_(nDimension) (input); THNN_ARGCHECK(numInputDims == 3 || numInputDims == 4, 2, input, "3D or 4D (batch mode) tensor expected for input, but got: %s"); if (numInputDims == 4) { numBatch = THTensor_(size) (input, 0); planeDim++; heightDim++; widthDim++; } /* sizes */ int64_t numPlanes = THTensor_(size) (input, planeDim); int64_t inputH = THTensor_(size) (input, heightDim); int64_t inputW = THTensor_(size) (input, widthDim); THArgCheck(outputH + poolSizeH - 1 <= inputH, 7, "poolSizeH (%d) too large relative to input height (%d)", poolSizeH, inputH); THArgCheck(outputW + poolSizeW - 1 <= inputW, 6, "poolSizeW (%d) too large relative to input width (%d)", poolSizeW, inputW); /* get contiguous input */ input = THTensor_(newContiguous) (input); if (numInputDims == 3) { /* resize output */ THTensor_(resize3d) (output, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THIndexTensor_(resize3d) (indices, numPlanes, outputH, outputW); THNN_(SpatialFractionalMaxPooling_updateOutput_frame) ( THTensor_(data) (input), THTensor_(data) (output), THIndexTensor_(data) (indices), THTensor_(data) (randomSamples), numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } else { THTensor_(resize4d) (output, numBatch, numPlanes, outputH, outputW); /* indices will contain the locations for each output point */ THIndexTensor_(resize4d) (indices, numBatch, numPlanes, outputH, outputW); int64_t batch; #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateOutput_frame) ( THTensor_(data) (input) + batch * numPlanes * inputH * inputW, THTensor_(data) (output) + batch * numPlanes * outputH * outputW, THIndexTensor_(data) (indices) + batch * numPlanes * outputH * outputW, THTensor_(data) (randomSamples) + batch * numPlanes * 2, numPlanes, inputW, inputH, outputW, outputH, poolSizeW, poolSizeH); } } /* cleanup */ THTensor_(free) (input); } static void THNN_(SpatialFractionalMaxPooling_updateGradInput_frame) ( real * gradInput, real * gradOutput, THIndex_t * indices, int64_t numPlanes, int64_t inputW, int64_t inputH, int64_t outputW, int64_t outputH) { int64_t plane; #pragma omp parallel for private(plane) for (plane = 0; plane < numPlanes; plane++) { real *gradInputForPlane = gradInput + plane * inputW * inputH; real *gradOutputForPlane = gradOutput + plane * outputW * outputH; THIndex_t *indicesForPlane = indices + plane * outputW * outputH; int64_t h, w; for (h = 0; h < outputH; ++h) { for (w = 0; w < outputW; ++w) { int64_t outputIndex = h * outputW + w; int64_t index = indicesForPlane[outputIndex] - TH_INDEX_BASE; THAssert(index >= 0 && index < inputW * inputH); gradInputForPlane[index] += gradOutputForPlane[outputIndex]; } } } } void THNN_(SpatialFractionalMaxPooling_updateGradInput) ( THNNState * state, THTensor * input, THTensor * gradOutput, THTensor * gradInput, int outputW, int outputH, int poolSizeW, int poolSizeH, THIndexTensor * indices) { int64_t numBatch = 1; int planeDim = 0; int heightDim = 1; int widthDim = 2; int64_t numInputDims = THTensor_(nDimension) (input); if (numInputDims == 4) { numBatch = THTensor_(size) (input, 0); planeDim = 1; heightDim++; widthDim++; } /* sizes */ int64_t numPlanes = THTensor_(size) (input, planeDim); int64_t inputH = THTensor_(size) (input, heightDim); int64_t inputW = THTensor_(size) (input, widthDim); THArgCheck(outputW == THTensor_(size) (gradOutput, widthDim), 3, "gradOutput width unexpected"); THArgCheck(outputH == THTensor_(size) (gradOutput, heightDim), 3, "gradOutput height unexpected"); /* get contiguous gradOutput */ gradOutput = THTensor_(newContiguous) (gradOutput); /* resize */ THTensor_(resizeAs) (gradInput, input); THTensor_(zero) (gradInput); /* backprop */ if (numInputDims == 3) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame) ( THTensor_(data) (gradInput), THTensor_(data) (gradOutput), THIndexTensor_(data) (indices), numPlanes, inputW, inputH, outputW, outputH); } else { int64_t batch; #pragma omp parallel for private(batch) for (batch = 0; batch < numBatch; ++batch) { THNN_(SpatialFractionalMaxPooling_updateGradInput_frame) ( THTensor_(data) (gradInput) + batch * numPlanes * inputH * inputW, THTensor_(data) (gradOutput) + batch * numPlanes * outputH * outputW, THIndexTensor_(data) (indices) + batch * numPlanes * outputH * outputW, numPlanes, inputW, inputH, outputW, outputH); } } /* cleanup */ THTensor_(free) (gradOutput); } #endif
diagsm_x_csc_u_col.c
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT c = 0; c < columns; ++c) { for (ALPHA_INT r = 0; r < A->rows; ++r) { alpha_mul(y[index2(c, r, ldy)], alpha, x[index2(c, r, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC * A, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number * y, const ALPHA_INT ldy) { ALPHA_INT num_thread = alpha_get_thread_num(); for (ALPHA_INT c = 0; c < columns; ++c) { for (ALPHA_INT r = 0; r < A->rows; ++r) { alpha_mul(y[index2(c, r, ldy)], alpha, x[index2(c, r, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC * A, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number * y, const ALPHA_INT ldy) { ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT c = 0; c < columns; ++c) { for (ALPHA_INT r = 0; r < A->rows; ++r) { alpha_mul(y[index2(c, r, ldy)], alpha, x[index2(c, r, ldx)]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
main.c
/*======================================*/ /*= Autor: Tiago Serique Valadares =*/ /*= GRR: 20195138 =*/ /*= Disciplina: Aprendizado de Maquina =*/ /*======================================*/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "knn.h" #include "read_data.h" int main(int argc, char *argv[]){ char train_base_file_name[LINESIZE]; char test_base_file_name[LINESIZE]; int k = 0; int n_lines_test = 0; int n_features = 0; int n_lines_train = 0; int n_classes = 0; int **confusion_matrix = NULL; Data *train_data_array = NULL; Data *test_data_array = NULL; FILE* train_base_file = NULL; FILE* test_base_file = NULL; if ( argc < 4 ){ printf("Formato de entrada:\n"); printf("knn <base de treinamento> <base de teste> <valor de k>\n"); return EXIT_FAILURE; } strcpy(train_base_file_name, argv[1]); strcpy(test_base_file_name, argv[2]); k = atoi(argv[3]); // open the train base file train_base_file = fopen(train_base_file_name, "r"); if ( train_base_file == NULL ){ printf("Not able to open the train base file\n"); return EXIT_FAILURE; } train_data_array = readData(train_base_file, &n_lines_train, &n_features, &n_classes); fclose(train_base_file); // open the test base file test_base_file = fopen(test_base_file_name, "r"); if ( test_base_file == NULL ){ printf("Not able to open the test base file\n"); return EXIT_FAILURE; } test_data_array = readData(test_base_file, &n_lines_test, &n_features, &n_classes); fclose(test_base_file); confusion_matrix = (int **)malloc(sizeof(int *) * n_classes + n_classes * n_classes * sizeof(int)); confusion_matrix[0] = (int *)(confusion_matrix + n_classes); #pragma omp parallel for for (int i = 1; i < n_classes; i++) confusion_matrix[i] = confusion_matrix[0] + (i * n_classes); #pragma omp parallel for for (int i = 0; i < n_classes; i++) for (int j = 0; j < n_classes; j++) confusion_matrix[i][j] = 0; knn(confusion_matrix, train_data_array, test_data_array, k, n_lines_train, n_lines_test, n_features, n_classes); printConfusionMatrix(confusion_matrix, n_classes); calculateAccuracy(confusion_matrix, n_classes); free(train_data_array); free(test_data_array); free(confusion_matrix); return EXIT_SUCCESS; }
/* ====================================== */ /* = Autor: Tiago Serique Valadares = */ /* = GRR: 20195138 = */ /* = Disciplina: Aprendizado de Maquina = */ /* ====================================== */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "knn.h" #include "read_data.h" int main(int argc, char *argv[]) { char train_base_file_name[LINESIZE]; char test_base_file_name[LINESIZE]; int k = 0; int n_lines_test = 0; int n_features = 0; int n_lines_train = 0; int n_classes = 0; int **confusion_matrix = NULL; Data *train_data_array = NULL; Data *test_data_array = NULL; FILE *train_base_file = NULL; FILE *test_base_file = NULL; if (argc < 4) { printf("Formato de entrada:\n"); printf("knn <base de treinamento> <base de teste> <valor de k>\n"); return EXIT_FAILURE; } strcpy(train_base_file_name, argv[1]); strcpy(test_base_file_name, argv[2]); k = atoi(argv[3]); //open the train base file train_base_file = fopen(train_base_file_name, "r"); if (train_base_file == NULL) { printf("Not able to open the train base file\n"); return EXIT_FAILURE; } train_data_array = readData(train_base_file, &n_lines_train, &n_features, &n_classes); fclose(train_base_file); //open the test base file test_base_file = fopen(test_base_file_name, "r"); if (test_base_file == NULL) { printf("Not able to open the test base file\n"); return EXIT_FAILURE; } test_data_array = readData(test_base_file, &n_lines_test, &n_features, &n_classes); fclose(test_base_file); confusion_matrix = (int **)malloc(sizeof(int *) * n_classes + n_classes * n_classes * sizeof(int)); confusion_matrix[0] = (int *)(confusion_matrix + n_classes); for (int i = 1; i < n_classes; i++) confusion_matrix[i] = confusion_matrix[0] + (i * n_classes); for (int i = 0; i < n_classes; i++) for (int j = 0; j < n_classes; j++) confusion_matrix[i][j] = 0; knn(confusion_matrix, train_data_array, test_data_array, k, n_lines_train, n_lines_test, n_features, n_classes); printConfusionMatrix(confusion_matrix, n_classes); calculateAccuracy(confusion_matrix, n_classes); free(train_data_array); free(test_data_array); free(confusion_matrix); return EXIT_SUCCESS; }
/* ====================================== */ /* = Autor: Tiago Serique Valadares = */ /* = GRR: 20195138 = */ /* = Disciplina: Aprendizado de Maquina = */ /* ====================================== */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "knn.h" #include "read_data.h" int main(int argc, char *argv[]) { char train_base_file_name[LINESIZE]; char test_base_file_name[LINESIZE]; int k = 0; int n_lines_test = 0; int n_features = 0; int n_lines_train = 0; int n_classes = 0; int **confusion_matrix = NULL; Data *train_data_array = NULL; Data *test_data_array = NULL; FILE *train_base_file = NULL; FILE *test_base_file = NULL; if (argc < 4) { printf("Formato de entrada:\n"); printf("knn <base de treinamento> <base de teste> <valor de k>\n"); return EXIT_FAILURE; } strcpy(train_base_file_name, argv[1]); strcpy(test_base_file_name, argv[2]); k = atoi(argv[3]); //open the train base file train_base_file = fopen(train_base_file_name, "r"); if (train_base_file == NULL) { printf("Not able to open the train base file\n"); return EXIT_FAILURE; } train_data_array = readData(train_base_file, &n_lines_train, &n_features, &n_classes); fclose(train_base_file); //open the test base file test_base_file = fopen(test_base_file_name, "r"); if (test_base_file == NULL) { printf("Not able to open the test base file\n"); return EXIT_FAILURE; } test_data_array = readData(test_base_file, &n_lines_test, &n_features, &n_classes); fclose(test_base_file); confusion_matrix = (int **)malloc(sizeof(int *) * n_classes + n_classes * n_classes * sizeof(int)); confusion_matrix[0] = (int *)(confusion_matrix + n_classes); #pragma omp parallel for for (int i = 1; i < n_classes; i++) confusion_matrix[i] = confusion_matrix[0] + (i * n_classes); #pragma omp parallel for for (int i = 0; i < n_classes; i++) for (int j = 0; j < n_classes; j++) confusion_matrix[i][j] = 0; knn(confusion_matrix, train_data_array, test_data_array, k, n_lines_train, n_lines_test, n_features, n_classes); printConfusionMatrix(confusion_matrix, n_classes); calculateAccuracy(confusion_matrix, n_classes); free(train_data_array); free(test_data_array); free(confusion_matrix); return EXIT_SUCCESS; }
F-type_fermi_dirac.c
/* A. Odrzywolek, AOdrzywolek */ #include "../fermidirac.h" #include <stdlib.h> #include <string.h> #include <unistd.h> #include <math.h> #include <stdio.h> #include <float.h> /* Functions below are integrated with so-called DoubleExponential or Tanh-Sinh quadrature. * * Some references: * * Mori, Masatake (2005), "Discovery of the double exponential transformation and its developments", * Publications of the Research Institute for Mathematical Sciences 41 (4): 897–935, * doi:10.2977/prims/1145474600, * ISSN 0034-5318 * http://www.kurims.kyoto-u.ac.jp/~okamoto/paper/Publ_RIMS_DE/41-4-38.pdf, eq. (4.17) * * See also: http://en.wikipedia.org/wiki/Tanh-sinh_quadrature and references therein. * */ /* SECTION FOR RELATIVISTIC Fermi-Dirac integrals (F-function) */ double integrandF(const double t, const double k, const double eta, const double theta) { double x,dx,integrand,result,factor; /* if(t>-6.5) * * this is min t=-9.3, for which exp(t-exp(-t)) is still smaller than LDBL_MIN defined in <float.h>. For DBL_MIN it is t>-6.5, but using proper (unsafe?) coding modern CPU's do calculations internally in long double format anyway. NOTE: obsolete, see comment below where optimal coding for integrand is described. */ x = exp( t - exp(-t) ); /* Masatake Mori, eq. (4.17) */ //if( (eta>k) && (k>0) ) x = eta*exp(t-exp(-t)); else x = exp(t-exp(-t)); //dx = x*(1 + exp(-t) ); /* dx/dt */ dx = 1.0+exp(-t); /* in this case x is adsorbed in integrand, and x^k -> x^(k+1) */ if(x-eta<-log(DBL_EPSILON)) // if using machine precison we are unable to add 1.0 to exp(), then approximation is optimal { factor = 1.0/(1.0+exp(x-eta) ); integrand = exp( (k+1.0)*(t - exp(-t)) ); //integrand = pow(x,k+1.0); integrand = integrand*sqrt(1.0+0.5*theta*x)*factor; } else { //factor = exp(eta-x) adsorbed into exp, to avoid 0*infinity mess integrand = exp((k+1.0)*(t - exp(-t)) + eta - x ); integrand = integrand*sqrt(1.0+ 0.5*theta*x); } /* NOTE: * * if we use: * * integrand = pow(x,k+1.0)*sqrt(1.0+ 0.5*theta*x)*factor; * * then: * * a) precision is lost, beacuse x is double, while exp((k+1.0)*(t - exp(-t)) ) * is internally handled as long double (96 bit) * b) if k<0 we lost advantage of postponed underflow ( k+1 << 1 in such a case ) * */ #if DEBUG printf("DEBUG300: factor = %.20Lf, x=%.20Lf, dx=%.20Lf, integrand=%.20Lf, return = %.20Lf \t test= %.20Lf \n",factor, x,dx,integrand, (integrand*dx),test); #endif result = integrand*dx; return result; } long double integrandF_long(const long double t, const long double k, const long double eta, const long double theta) { long double x,dx,integrand,result,factor; //const double lambda = M_E*100.5;//scaling factor /* if(t>-6.5) * * this is min t=-9.3, for which exp(t-exp(-t)) is still smaller than LDBL_MIN defined in <float.h>. For DBL_MIN it is t>-6.5, but using proper (unsafe?) coding modern CPU's do calculations internally in long double format anyway. NOTE: obsolete, see comment below where optimal coding for integrand is described. */ x = expl( t - expl(-t) ); /* Masatake Mori, eq. (4.17) */ //dx = x*(1 + exp(-t) ); /* dx/dt */ dx = 1.0L+exp(-t); /* in this case x is adsorbed in integrand, and x^k -> x^(k+1) */ if(x-eta<-logl(LDBL_EPSILON)) // if using machine precison we are unable to add 1.0 to exp(), then approximation is optimal { factor = 1.0L/(1.0L+expl(x-eta) ); //integrand = expl( (kL+1.0L)*(tL - expl(-tL)) ); integrand = powl(x,k+1.0L); integrand = integrand*sqrtl(1.0L+0.5L*theta*x)*factor; } else { //factor = exp(eta-x) adsorbed into exp, to avoid 0*infinity mess integrand = expl((k+1.0L)*(t - expl(-t)) + eta - x ); integrand = integrand*sqrtl(1.0L+ 0.5L*theta*x); } result = integrand*dx; return result; } double Ffermi_estimate(double h, double last_result, double k, double eta, double theta) { int step,i; double sum_Left_old, sum_Right_old; double sum_Left_new, sum_Right_new; double old_result, new_result; #if KAHAN double c=0.0,t,y; // https://en.wikipedia.org/wiki/Kahan_summation_algorithm #endif if(last_result<0.0) /* Negative value means first iteration*/ { step=1; old_result = 2.0*h*integrandF(0.0, k, eta, theta); } else { step=2; old_result = last_result; } #if DEBUG printf("DEBUG2: old=%e,\tlast=%e\n",old_result,last_result); #endif /* integral for 0 < t < Infinity */ sum_Right_old = 0.0; sum_Right_new = 0.0; i=1; /* possible vectorization, but loop step must be known at compile time! #pragma omp simd #pragma ivdep for(i=1;i<=16;i+=2) { sum_Right_new += integrandF(h*i, k, eta, theta); } */ do { sum_Right_old = sum_Right_new; #if KAHAN y = integrandF(h*i, k, eta, theta) - c; t = sum_Right_new + y; c = (t-sum_Right_new) - y; sum_Right_new = t; #else sum_Right_new = sum_Right_old + integrandF(h*i, k, eta, theta); //sum_Right_new = sum_Right_old + integrandF(h*i, k, eta, theta); #endif i = i + step; } while ( sum_Right_old<sum_Right_new ); //floating point fixed-point method /* integral for -Infinity < t <0 */ sum_Left_old = 0.0; sum_Left_new = 0.0; #if KAHAN c = 0.0; #endif i=-1; do { sum_Left_old = sum_Left_new; #if KAHAN y = integrandF(h*i, k, eta, theta) - c; t = sum_Left_new + y; c = (t-sum_Left_new) - y; sum_Left_new = t; #else sum_Left_new = sum_Left_old + integrandF(h*i, k, eta, theta); #endif i = i - step; } while (sum_Left_old<sum_Left_new); new_result = h*(sum_Left_new + sum_Right_new) + 0.5*old_result; return new_result; } long double Ffermi_estimate_long(long double h, long double last_result, long double k, long double eta, long double theta) { int step,i; long double sum_Left_old, sum_Right_old; long double sum_Left_new, sum_Right_new; long double old_result, new_result; if(last_result<0.0L) /* Negative value means first iteration*/ { step=1; old_result = 2.0L*h*integrandF_long(0.0L, k, eta, theta); } else { step=2; old_result = last_result; } /* integral for 0 < t < Infinity */ sum_Right_old = 0.0; sum_Right_new = 0.0; i=1; do { sum_Right_old = sum_Right_new; sum_Right_new = sum_Right_old + integrandF_long(h*i, k, eta, theta); i = i + step; } while ( sum_Right_old<sum_Right_new ); //floating point fixed-point method /* integral for -Infinity < t <0 */ sum_Left_old = 0.0; sum_Left_new = 0.0; i=-1; do { sum_Left_old = sum_Left_new; sum_Left_new = sum_Left_old + integrandF_long(h*i, k, eta, theta); i = i - step; } while (sum_Left_old<sum_Left_new); new_result = h*(sum_Left_new + sum_Right_new) + 0.5L*old_result; return new_result; } double Ffermi_value(const double k, const double eta, const double theta, const double precision, const int recursion_limit) { double old=0.0, new=0.0, h=0.5; if(k<=-1.0) return nan("NaN"); /* not converging for k <= -1 */ #if DEBUG printf("DEBUG0: h=%lf,\tval=%e\n",h,new); #endif old = 0.0; new = Ffermi_estimate(h, -1.0, k, eta, theta); #if DEBUG printf("DEBUG1: h=%lf,\tval=%e\n",h,new); #endif while( fabs(old-new)>precision*fabs(new) && h>pow(2.0,-recursion_limit)) { old=new; h=0.5*h; new = Ffermi_estimate(h, old, k, eta, theta); #if DEBUG printf("DEBUG4: h=%lf,\tval=%e\n",h,new); #endif } return new; } long double Ffermi_dblexp_long(const long double k, const long double eta, const long double theta, const long double precision, const int recursion_limit) { long double old=0.0L, new=0.0L, h=0.5L; if(k<=-1.0L) return nan("NaN"); /* not converging for k <= -1 */ old = 0.0L; new = Ffermi_estimate_long(h, -1.0L, k, eta, theta); while( fabsl(old-new)>precision*fabsl(new) && h>powl(2.0L,-recursion_limit)) { old=new; h=0.5L*h; new = Ffermi_estimate_long(h, old, k, eta, theta); } return new; } /* TODO: error control not implemented ! */ double Ffermi_sommerfeld(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { double leading_term, derivative,asymptotic_terms=0.0; int i,j; const double etaTBL[12] = {0.50000000000000000000000000000000, \ 0.69314718055994530941723212145818, \ 0.82246703342411321823620758332301, \ 0.90154267736969571404980362113359, \ 0.94703282949724591757650323447352, \ 0.97211977044690930593565514355347, \ 0.98555109129743510409843924448495, \ 0.99259381992283028267042571313339, \ 0.99623300185264789922728926008280, \ 0.99809429754160533076778303185260, \ 0.99903950759827156563922184569934, \ 0.99951714349806075414409417482869}; //leading_term = pow(eta,1.0+k)/(1.0+k)*hyp2f1(-0.5,1.0+k,2.0+k,-0.5*eta*theta); leading_term = pow(eta,1.0+k)/(1.0+k)*sommerfeld_leading_term(k,-0.5*eta*theta); if(SERIES_TERMS_MAX==0) return leading_term; if(SERIES_TERMS_MAX==1) return leading_term + M_PI*M_PI/6.0*(pow(eta,k)*theta/4.0/sqrt(1.0+theta*eta/2.0)+k*pow(eta,k-1.0)*sqrt(1.0+theta*eta/2.0)); for(i=1;i<=SERIES_TERMS_MAX;i++) { derivative = 0.0; for(j=0;j<=2*i-1;j++) derivative = derivative + binom(2*i-1,j)*tgamma(1.5)*tgamma(1.0+k)/tgamma(1.5-j)/tgamma(2.0+k-2.0*i+j) *pow(0.5*theta,j)*pow(1.0+0.5*theta*eta,0.5-j)*pow(eta,1.0-2.0*i+j+k); if(i>5) asymptotic_terms = asymptotic_terms + derivative*dirichlet_eta(2.0*i,DBL_EPSILON,64); else asymptotic_terms = asymptotic_terms + derivative*etaTBL[2*i]; } return leading_term + 2.0*asymptotic_terms; } double Ffermi_series_neg(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { double sum_old=0.0, sum_new=0.0,x; int i=0; x=2.0/theta; do { i++; sum_old = sum_new; sum_new += ( i % 2 == 0 ) ? exp(i*eta)*U(k,i*x) : -exp(i*eta)*U(k,i*x); } while( ( (precision>0) ? fabs(sum_old-sum_new) >= precision*sum_new: sum_old!=sum_new ) && i<SERIES_TERMS_MAX ); return -sum_new*tgamma(1.0+k)*pow(x,1.0+k); } double Ffermi_series_sqrt_a(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { #include "factorial.h" int i; double sum_old=0.0, sum_new=0.0; //for(i=0;i<SERIES_TERMS_MAX;i++) sum = sum + Ffermi_complete(k+i,eta)*pow(0.5*theta,i)*binom12[i]; i=0; do { sum_old = sum_new; sum_new += Ffermi_complete(k+i,eta)*pow(0.5*theta,i)*binom12[i]; i++; } while( ( (precision>0) ? fabs(sum_old-sum_new) >= precision*sum_new: sum_old!=sum_new ) && i<SERIES_TERMS_MAX ); //printf("\nDBG:\t%e\t%d\n",theta,i); return sum_new; } double Ffermi_series_sqrt_b(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { #include "factorial.h" int i; double sum=0.0; for(i=0;( (i<SERIES_TERMS_MAX) && (k+0.5-i>-1.0) );i++) sum = sum + Ffermi_complete(k-i+0.5,eta)*pow(0.5*theta,0.5-i)*binom12[i]; //printf("\nDBG:\t%e\t%d\n",theta,i); return sum; } double Ffermi(const double k, const double eta, const double theta) { #if 0 if( fmax(1.0+k-log(DBL_EPSILON),eta+1.0+k-log(DBL_EPSILON))*theta<sqrt(DBL_EPSILON) ) { /* special case for tiny theta relative to 1 and eta */ printf("SPECIAL\t"); return Ffermi_series_sqrt(k, eta, theta); } #endif if( eta>56000.0) return Ffermi_sommerfeld(k, eta, theta, DBL_EPSILON, 32); else if( (eta<0.0) && (k>25.0) && (theta>=1.0) ) return Ffermi_series_neg(k, eta, theta, DBL_EPSILON, 32); else return Ffermi_value(k,eta,theta,PRECISION_GOAL, MAX_REFINE); } long double Ffermi_long(const long double k, const long double eta, const long double theta) { return Ffermi_dblexp_long(k,eta,theta,PRECISION_GOAL, MAX_REFINE); }
/* * A. Odrzywolek, AOdrzywolek */ #include "../fermidirac.h" #include <stdlib.h> #include <string.h> #include <unistd.h> #include <math.h> #include <stdio.h> #include <float.h> /* * Functions below are integrated with so-called DoubleExponential or * Tanh-Sinh quadrature. * * Some references: * * Mori, Masatake (2005), "Discovery of the double exponential transformation * and its developments", Publications of the Research Institute for * Mathematical Sciences 41 (4): 897–935, doi:10.2977/prims/1145474600, * ISSN 0034-5318 * http://www.kurims.kyoto-u.ac.jp/~okamoto/paper/Publ_RIMS_DE/41-4-38.pdf, * eq. (4.17) * * See also: http://en.wikipedia.org/wiki/Tanh-sinh_quadrature and references * therein. * */ /* * * SECTION FOR RELATIVISTIC Fermi-Dirac integrals (F-function) * * */ double integrandF(const double t, const double k, const double eta, const double theta) { double x, dx, integrand, result, factor; /* * if(t>-6.5) * * this is min t=-9.3, for which exp(t-exp(-t)) is still smaller than * LDBL_MIN defined in <float.h>. For DBL_MIN it is t>-6.5, but using * proper (unsafe?) coding modern CPU's do calculations internally in * long double format anyway. NOTE: obsolete, see comment below where * optimal coding for integrand is described. * */ x = exp(t - exp(-t)); /* Masatake Mori, eq. (4.17) */ //if ((eta > k) && (k > 0)) x = eta * exp(t - exp(-t)); else x = exp(t - exp(-t)); //dx = x * (1 + exp(-t)); /* dx/dt */ dx = 1.0 + exp(-t); /* in this case x is adsorbed in integrand, * and x^k -> x^(k+1) */ if (x - eta < -log(DBL_EPSILON)) //if using machine precison we are unable to add 1.0 to exp(), then approximation is optimal { factor = 1.0 / (1.0 + exp(x - eta)); integrand = exp((k + 1.0) * (t - exp(-t))); //integrand = pow(x, k + 1.0); integrand = integrand * sqrt(1.0 + 0.5 * theta * x) * factor; } else { //factor = exp(eta - x) adsorbed into exp, to avoid 0 * infinity mess integrand = exp((k + 1.0) * (t - exp(-t)) + eta - x); integrand = integrand * sqrt(1.0 + 0.5 * theta * x); } /* * NOTE: * * if we use: * * integrand = pow(x,k+1.0)*sqrt(1.0+ 0.5*theta*x)*factor; * * then: * * a) precision is lost, beacuse x is double, while exp((k+1.0)*(t - * exp(-t)) ) is internally handled as long double (96 bit) b) if k<0 we * lost advantage of postponed underflow ( k+1 << 1 in such a case ) * */ #if DEBUG printf("DEBUG300: factor = %.20Lf, x=%.20Lf, dx=%.20Lf, integrand=%.20Lf, return = %.20Lf \t test= %.20Lf \n", factor, x, dx, integrand, (integrand * dx), test); #endif result = integrand * dx; return result; } long double integrandF_long(const long double t, const long double k, const long double eta, const long double theta) { long double x, dx, integrand, result, factor; //const double lambda = M_E * 100.5; //scaling factor /* * if(t>-6.5) * * this is min t=-9.3, for which exp(t-exp(-t)) is still smaller than * LDBL_MIN defined in <float.h>. For DBL_MIN it is t>-6.5, but using * proper (unsafe?) coding modern CPU's do calculations internally in * long double format anyway. NOTE: obsolete, see comment below where * optimal coding for integrand is described. * */ x = expl(t - expl(-t)); /* Masatake Mori, eq. (4.17) */ //dx = x * (1 + exp(-t)); /* dx/dt */ dx = 1.0L + exp(-t); /* in this case x is adsorbed in integrand, * and x^k -> x^(k+1) */ if (x - eta < -logl(LDBL_EPSILON)) //if using machine precison we are unable to add 1.0 to exp(), then approximation is optimal { factor = 1.0L / (1.0L + expl(x - eta)); //integrand = expl((kL + 1.0L) * (tL - expl(-tL))); integrand = powl(x, k + 1.0L); integrand = integrand * sqrtl(1.0L + 0.5L * theta * x) * factor; } else { //factor = exp(eta - x) adsorbed into exp, to avoid 0 * infinity mess integrand = expl((k + 1.0L) * (t - expl(-t)) + eta - x); integrand = integrand * sqrtl(1.0L + 0.5L * theta * x); } result = integrand * dx; return result; } double Ffermi_estimate(double h, double last_result, double k, double eta, double theta) { int step, i; double sum_Left_old, sum_Right_old; double sum_Left_new, sum_Right_new; double old_result, new_result; #if KAHAN double c = 0.0, t, y; //https://en.wikipedia.org / wiki / Kahan_summation_algorithm #endif if (last_result < 0.0) /* Negative value means first iteration */ { step = 1; old_result = 2.0 * h * integrandF(0.0, k, eta, theta); } else { step = 2; old_result = last_result; } #if DEBUG printf("DEBUG2: old=%e,\tlast=%e\n", old_result, last_result); #endif /* integral for 0 < t < Infinity */ sum_Right_old = 0.0; sum_Right_new = 0.0; i = 1; /* * possible vectorization, but loop step must be known at compile time! * #pragma ivdep for(i=1;i<=16;i+=2) { sum_Right_new += integrandF(h*i, * k, eta, theta); } */ do { sum_Right_old = sum_Right_new; #if KAHAN y = integrandF(h * i, k, eta, theta) - c; t = sum_Right_new + y; c = (t - sum_Right_new) - y; sum_Right_new = t; #else sum_Right_new = sum_Right_old + integrandF(h * i, k, eta, theta); //sum_Right_new = sum_Right_old + integrandF(h * i, k, eta, theta); #endif i = i + step; } while (sum_Right_old < sum_Right_new); //floating point fixed - point method /* integral for -Infinity < t <0 */ sum_Left_old = 0.0; sum_Left_new = 0.0; #if KAHAN c = 0.0; #endif i = -1; do { sum_Left_old = sum_Left_new; #if KAHAN y = integrandF(h * i, k, eta, theta) - c; t = sum_Left_new + y; c = (t - sum_Left_new) - y; sum_Left_new = t; #else sum_Left_new = sum_Left_old + integrandF(h * i, k, eta, theta); #endif i = i - step; } while (sum_Left_old < sum_Left_new); new_result = h * (sum_Left_new + sum_Right_new) + 0.5 * old_result; return new_result; } long double Ffermi_estimate_long(long double h, long double last_result, long double k, long double eta, long double theta) { int step, i; long double sum_Left_old, sum_Right_old; long double sum_Left_new, sum_Right_new; long double old_result, new_result; if (last_result < 0.0L) /* Negative value means first iteration */ { step = 1; old_result = 2.0L * h * integrandF_long(0.0L, k, eta, theta); } else { step = 2; old_result = last_result; } /* integral for 0 < t < Infinity */ sum_Right_old = 0.0; sum_Right_new = 0.0; i = 1; do { sum_Right_old = sum_Right_new; sum_Right_new = sum_Right_old + integrandF_long(h * i, k, eta, theta); i = i + step; } while (sum_Right_old < sum_Right_new); //floating point fixed - point method /* integral for -Infinity < t <0 */ sum_Left_old = 0.0; sum_Left_new = 0.0; i = -1; do { sum_Left_old = sum_Left_new; sum_Left_new = sum_Left_old + integrandF_long(h * i, k, eta, theta); i = i - step; } while (sum_Left_old < sum_Left_new); new_result = h * (sum_Left_new + sum_Right_new) + 0.5L * old_result; return new_result; } double Ffermi_value(const double k, const double eta, const double theta, const double precision, const int recursion_limit) { double old = 0.0, new = 0.0, h = 0.5; if (k <= -1.0) return nan("NaN"); /* not converging for k <= -1 */ #if DEBUG printf("DEBUG0: h=%lf,\tval=%e\n", h, new); #endif old = 0.0; new = Ffermi_estimate(h, -1.0, k, eta, theta); #if DEBUG printf("DEBUG1: h=%lf,\tval=%e\n", h, new); #endif while (fabs(old - new) > precision * fabs(new) && h > pow(2.0, -recursion_limit)) { old = new; h = 0.5 * h; new = Ffermi_estimate(h, old, k, eta, theta); #if DEBUG printf("DEBUG4: h=%lf,\tval=%e\n", h, new); #endif } return new; } long double Ffermi_dblexp_long(const long double k, const long double eta, const long double theta, const long double precision, const int recursion_limit) { long double old = 0.0L, new = 0.0L, h = 0.5L; if (k <= -1.0L) return nan("NaN"); /* not converging for k <= -1 */ old = 0.0L; new = Ffermi_estimate_long(h, -1.0L, k, eta, theta); while (fabsl(old - new) > precision * fabsl(new) && h > powl(2.0L, -recursion_limit)) { old = new; h = 0.5L * h; new = Ffermi_estimate_long(h, old, k, eta, theta); } return new; } /* TODO: error control not implemented ! */ double Ffermi_sommerfeld(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { double leading_term, derivative, asymptotic_terms = 0.0; int i, j; const double etaTBL[12] = {0.50000000000000000000000000000000, \ 0.69314718055994530941723212145818, \ 0.82246703342411321823620758332301, \ 0.90154267736969571404980362113359, \ 0.94703282949724591757650323447352, \ 0.97211977044690930593565514355347, \ 0.98555109129743510409843924448495, \ 0.99259381992283028267042571313339, \ 0.99623300185264789922728926008280, \ 0.99809429754160533076778303185260, \ 0.99903950759827156563922184569934, \ 0.99951714349806075414409417482869}; //leading_term = pow(eta, 1.0 + k) / (1.0 + k) * hyp2f1(-0.5, 1.0 + k, 2.0 + k, -0.5 * eta * theta); leading_term = pow(eta, 1.0 + k) / (1.0 + k) * sommerfeld_leading_term(k, -0.5 * eta * theta); if (SERIES_TERMS_MAX == 0) return leading_term; if (SERIES_TERMS_MAX == 1) return leading_term + M_PI * M_PI / 6.0 * (pow(eta, k) * theta / 4.0 / sqrt(1.0 + theta * eta / 2.0) + k * pow(eta, k - 1.0) * sqrt(1.0 + theta * eta / 2.0)); for (i = 1; i <= SERIES_TERMS_MAX; i++) { derivative = 0.0; for (j = 0; j <= 2 * i - 1; j++) derivative = derivative + binom(2 * i - 1, j) * tgamma(1.5) * tgamma(1.0 + k) / tgamma(1.5 - j) / tgamma(2.0 + k - 2.0 * i + j) * pow(0.5 * theta, j) * pow(1.0 + 0.5 * theta * eta, 0.5 - j) * pow(eta, 1.0 - 2.0 * i + j + k); if (i > 5) asymptotic_terms = asymptotic_terms + derivative * dirichlet_eta(2.0 * i, DBL_EPSILON, 64); else asymptotic_terms = asymptotic_terms + derivative * etaTBL[2 * i]; } return leading_term + 2.0 * asymptotic_terms; } double Ffermi_series_neg(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { double sum_old = 0.0, sum_new = 0.0, x; int i = 0; x = 2.0 / theta; do { i++; sum_old = sum_new; sum_new += (i % 2 == 0) ? exp(i * eta) * U(k, i * x) : -exp(i * eta) * U(k, i * x); } while (((precision > 0) ? fabs(sum_old - sum_new) >= precision * sum_new : sum_old != sum_new) && i < SERIES_TERMS_MAX); return -sum_new * tgamma(1.0 + k) * pow(x, 1.0 + k); } double Ffermi_series_sqrt_a(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { #include "factorial.h" int i; double sum_old = 0.0, sum_new = 0.0; //for (i = 0; i < SERIES_TERMS_MAX; i++) sum = sum + Ffermi_complete(k + i, eta) * pow(0.5 * theta, i) * binom12[i]; i = 0; do { sum_old = sum_new; sum_new += Ffermi_complete(k + i, eta) * pow(0.5 * theta, i) * binom12[i]; i++; } while (((precision > 0) ? fabs(sum_old - sum_new) >= precision * sum_new : sum_old != sum_new) && i < SERIES_TERMS_MAX); //printf("\nDBG:\t%e\t%d\n", theta, i); return sum_new; } double Ffermi_series_sqrt_b(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { #include "factorial.h" int i; double sum = 0.0; for (i = 0; ((i < SERIES_TERMS_MAX) && (k + 0.5 - i > -1.0)); i++) sum = sum + Ffermi_complete(k - i + 0.5, eta) * pow(0.5 * theta, 0.5 - i) * binom12[i]; //printf("\nDBG:\t%e\t%d\n", theta, i); return sum; } double Ffermi(const double k, const double eta, const double theta) { #if 0 if (fmax(1.0 + k - log(DBL_EPSILON), eta + 1.0 + k - log(DBL_EPSILON)) * theta < sqrt(DBL_EPSILON)) { /* special case for tiny theta relative to 1 and eta */ printf("SPECIAL\t"); return Ffermi_series_sqrt(k, eta, theta); } #endif if (eta > 56000.0) return Ffermi_sommerfeld(k, eta, theta, DBL_EPSILON, 32); else if ((eta < 0.0) && (k > 25.0) && (theta >= 1.0)) return Ffermi_series_neg(k, eta, theta, DBL_EPSILON, 32); else return Ffermi_value(k, eta, theta, PRECISION_GOAL, MAX_REFINE); } long double Ffermi_long(const long double k, const long double eta, const long double theta) { return Ffermi_dblexp_long(k, eta, theta, PRECISION_GOAL, MAX_REFINE); }
/* * A. Odrzywolek, AOdrzywolek */ #include "../fermidirac.h" #include <stdlib.h> #include <string.h> #include <unistd.h> #include <math.h> #include <stdio.h> #include <float.h> /* * Functions below are integrated with so-called DoubleExponential or * Tanh-Sinh quadrature. * * Some references: * * Mori, Masatake (2005), "Discovery of the double exponential transformation * and its developments", Publications of the Research Institute for * Mathematical Sciences 41 (4): 897–935, doi:10.2977/prims/1145474600, * ISSN 0034-5318 * http://www.kurims.kyoto-u.ac.jp/~okamoto/paper/Publ_RIMS_DE/41-4-38.pdf, * eq. (4.17) * * See also: http://en.wikipedia.org/wiki/Tanh-sinh_quadrature and references * therein. * */ /* * * SECTION FOR RELATIVISTIC Fermi-Dirac integrals (F-function) * * */ double integrandF(const double t, const double k, const double eta, const double theta) { double x, dx, integrand, result, factor; /* * if(t>-6.5) * * this is min t=-9.3, for which exp(t-exp(-t)) is still smaller than * LDBL_MIN defined in <float.h>. For DBL_MIN it is t>-6.5, but using * proper (unsafe?) coding modern CPU's do calculations internally in * long double format anyway. NOTE: obsolete, see comment below where * optimal coding for integrand is described. * */ x = exp(t - exp(-t)); /* Masatake Mori, eq. (4.17) */ //if ((eta > k) && (k > 0)) x = eta * exp(t - exp(-t)); else x = exp(t - exp(-t)); //dx = x * (1 + exp(-t)); /* dx/dt */ dx = 1.0 + exp(-t); /* in this case x is adsorbed in integrand, * and x^k -> x^(k+1) */ if (x - eta < -log(DBL_EPSILON)) //if using machine precison we are unable to add 1.0 to exp(), then approximation is optimal { factor = 1.0 / (1.0 + exp(x - eta)); integrand = exp((k + 1.0) * (t - exp(-t))); //integrand = pow(x, k + 1.0); integrand = integrand * sqrt(1.0 + 0.5 * theta * x) * factor; } else { //factor = exp(eta - x) adsorbed into exp, to avoid 0 * infinity mess integrand = exp((k + 1.0) * (t - exp(-t)) + eta - x); integrand = integrand * sqrt(1.0 + 0.5 * theta * x); } /* * NOTE: * * if we use: * * integrand = pow(x,k+1.0)*sqrt(1.0+ 0.5*theta*x)*factor; * * then: * * a) precision is lost, beacuse x is double, while exp((k+1.0)*(t - * exp(-t)) ) is internally handled as long double (96 bit) b) if k<0 we * lost advantage of postponed underflow ( k+1 << 1 in such a case ) * */ #if DEBUG printf("DEBUG300: factor = %.20Lf, x=%.20Lf, dx=%.20Lf, integrand=%.20Lf, return = %.20Lf \t test= %.20Lf \n", factor, x, dx, integrand, (integrand * dx), test); #endif result = integrand * dx; return result; } long double integrandF_long(const long double t, const long double k, const long double eta, const long double theta) { long double x, dx, integrand, result, factor; //const double lambda = M_E * 100.5; //scaling factor /* * if(t>-6.5) * * this is min t=-9.3, for which exp(t-exp(-t)) is still smaller than * LDBL_MIN defined in <float.h>. For DBL_MIN it is t>-6.5, but using * proper (unsafe?) coding modern CPU's do calculations internally in * long double format anyway. NOTE: obsolete, see comment below where * optimal coding for integrand is described. * */ x = expl(t - expl(-t)); /* Masatake Mori, eq. (4.17) */ //dx = x * (1 + exp(-t)); /* dx/dt */ dx = 1.0L + exp(-t); /* in this case x is adsorbed in integrand, * and x^k -> x^(k+1) */ if (x - eta < -logl(LDBL_EPSILON)) //if using machine precison we are unable to add 1.0 to exp(), then approximation is optimal { factor = 1.0L / (1.0L + expl(x - eta)); //integrand = expl((kL + 1.0L) * (tL - expl(-tL))); integrand = powl(x, k + 1.0L); integrand = integrand * sqrtl(1.0L + 0.5L * theta * x) * factor; } else { //factor = exp(eta - x) adsorbed into exp, to avoid 0 * infinity mess integrand = expl((k + 1.0L) * (t - expl(-t)) + eta - x); integrand = integrand * sqrtl(1.0L + 0.5L * theta * x); } result = integrand * dx; return result; } double Ffermi_estimate(double h, double last_result, double k, double eta, double theta) { int step, i; double sum_Left_old, sum_Right_old; double sum_Left_new, sum_Right_new; double old_result, new_result; #if KAHAN double c = 0.0, t, y; //https://en.wikipedia.org / wiki / Kahan_summation_algorithm #endif if (last_result < 0.0) /* Negative value means first iteration */ { step = 1; old_result = 2.0 * h * integrandF(0.0, k, eta, theta); } else { step = 2; old_result = last_result; } #if DEBUG printf("DEBUG2: old=%e,\tlast=%e\n", old_result, last_result); #endif /* integral for 0 < t < Infinity */ sum_Right_old = 0.0; sum_Right_new = 0.0; i = 1; /* * possible vectorization, but loop step must be known at compile time! * #pragma omp simd #pragma ivdep for(i=1;i<=16;i+=2) { sum_Right_new += * integrandF(h*i, k, eta, theta); } */ do { sum_Right_old = sum_Right_new; #if KAHAN y = integrandF(h * i, k, eta, theta) - c; t = sum_Right_new + y; c = (t - sum_Right_new) - y; sum_Right_new = t; #else sum_Right_new = sum_Right_old + integrandF(h * i, k, eta, theta); //sum_Right_new = sum_Right_old + integrandF(h * i, k, eta, theta); #endif i = i + step; } while (sum_Right_old < sum_Right_new); //floating point fixed - point method /* integral for -Infinity < t <0 */ sum_Left_old = 0.0; sum_Left_new = 0.0; #if KAHAN c = 0.0; #endif i = -1; do { sum_Left_old = sum_Left_new; #if KAHAN y = integrandF(h * i, k, eta, theta) - c; t = sum_Left_new + y; c = (t - sum_Left_new) - y; sum_Left_new = t; #else sum_Left_new = sum_Left_old + integrandF(h * i, k, eta, theta); #endif i = i - step; } while (sum_Left_old < sum_Left_new); new_result = h * (sum_Left_new + sum_Right_new) + 0.5 * old_result; return new_result; } long double Ffermi_estimate_long(long double h, long double last_result, long double k, long double eta, long double theta) { int step, i; long double sum_Left_old, sum_Right_old; long double sum_Left_new, sum_Right_new; long double old_result, new_result; if (last_result < 0.0L) /* Negative value means first iteration */ { step = 1; old_result = 2.0L * h * integrandF_long(0.0L, k, eta, theta); } else { step = 2; old_result = last_result; } /* integral for 0 < t < Infinity */ sum_Right_old = 0.0; sum_Right_new = 0.0; i = 1; do { sum_Right_old = sum_Right_new; sum_Right_new = sum_Right_old + integrandF_long(h * i, k, eta, theta); i = i + step; } while (sum_Right_old < sum_Right_new); //floating point fixed - point method /* integral for -Infinity < t <0 */ sum_Left_old = 0.0; sum_Left_new = 0.0; i = -1; do { sum_Left_old = sum_Left_new; sum_Left_new = sum_Left_old + integrandF_long(h * i, k, eta, theta); i = i - step; } while (sum_Left_old < sum_Left_new); new_result = h * (sum_Left_new + sum_Right_new) + 0.5L * old_result; return new_result; } double Ffermi_value(const double k, const double eta, const double theta, const double precision, const int recursion_limit) { double old = 0.0, new = 0.0, h = 0.5; if (k <= -1.0) return nan("NaN"); /* not converging for k <= -1 */ #if DEBUG printf("DEBUG0: h=%lf,\tval=%e\n", h, new); #endif old = 0.0; new = Ffermi_estimate(h, -1.0, k, eta, theta); #if DEBUG printf("DEBUG1: h=%lf,\tval=%e\n", h, new); #endif while (fabs(old - new) > precision * fabs(new) && h > pow(2.0, -recursion_limit)) { old = new; h = 0.5 * h; new = Ffermi_estimate(h, old, k, eta, theta); #if DEBUG printf("DEBUG4: h=%lf,\tval=%e\n", h, new); #endif } return new; } long double Ffermi_dblexp_long(const long double k, const long double eta, const long double theta, const long double precision, const int recursion_limit) { long double old = 0.0L, new = 0.0L, h = 0.5L; if (k <= -1.0L) return nan("NaN"); /* not converging for k <= -1 */ old = 0.0L; new = Ffermi_estimate_long(h, -1.0L, k, eta, theta); while (fabsl(old - new) > precision * fabsl(new) && h > powl(2.0L, -recursion_limit)) { old = new; h = 0.5L * h; new = Ffermi_estimate_long(h, old, k, eta, theta); } return new; } /* TODO: error control not implemented ! */ double Ffermi_sommerfeld(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { double leading_term, derivative, asymptotic_terms = 0.0; int i, j; const double etaTBL[12] = {0.50000000000000000000000000000000, \ 0.69314718055994530941723212145818, \ 0.82246703342411321823620758332301, \ 0.90154267736969571404980362113359, \ 0.94703282949724591757650323447352, \ 0.97211977044690930593565514355347, \ 0.98555109129743510409843924448495, \ 0.99259381992283028267042571313339, \ 0.99623300185264789922728926008280, \ 0.99809429754160533076778303185260, \ 0.99903950759827156563922184569934, \ 0.99951714349806075414409417482869}; //leading_term = pow(eta, 1.0 + k) / (1.0 + k) * hyp2f1(-0.5, 1.0 + k, 2.0 + k, -0.5 * eta * theta); leading_term = pow(eta, 1.0 + k) / (1.0 + k) * sommerfeld_leading_term(k, -0.5 * eta * theta); if (SERIES_TERMS_MAX == 0) return leading_term; if (SERIES_TERMS_MAX == 1) return leading_term + M_PI * M_PI / 6.0 * (pow(eta, k) * theta / 4.0 / sqrt(1.0 + theta * eta / 2.0) + k * pow(eta, k - 1.0) * sqrt(1.0 + theta * eta / 2.0)); for (i = 1; i <= SERIES_TERMS_MAX; i++) { derivative = 0.0; for (j = 0; j <= 2 * i - 1; j++) derivative = derivative + binom(2 * i - 1, j) * tgamma(1.5) * tgamma(1.0 + k) / tgamma(1.5 - j) / tgamma(2.0 + k - 2.0 * i + j) * pow(0.5 * theta, j) * pow(1.0 + 0.5 * theta * eta, 0.5 - j) * pow(eta, 1.0 - 2.0 * i + j + k); if (i > 5) asymptotic_terms = asymptotic_terms + derivative * dirichlet_eta(2.0 * i, DBL_EPSILON, 64); else asymptotic_terms = asymptotic_terms + derivative * etaTBL[2 * i]; } return leading_term + 2.0 * asymptotic_terms; } double Ffermi_series_neg(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { double sum_old = 0.0, sum_new = 0.0, x; int i = 0; x = 2.0 / theta; do { i++; sum_old = sum_new; sum_new += (i % 2 == 0) ? exp(i * eta) * U(k, i * x) : -exp(i * eta) * U(k, i * x); } while (((precision > 0) ? fabs(sum_old - sum_new) >= precision * sum_new : sum_old != sum_new) && i < SERIES_TERMS_MAX); return -sum_new * tgamma(1.0 + k) * pow(x, 1.0 + k); } double Ffermi_series_sqrt_a(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { #include "factorial.h" int i; double sum_old = 0.0, sum_new = 0.0; //for (i = 0; i < SERIES_TERMS_MAX; i++) sum = sum + Ffermi_complete(k + i, eta) * pow(0.5 * theta, i) * binom12[i]; i = 0; do { sum_old = sum_new; sum_new += Ffermi_complete(k + i, eta) * pow(0.5 * theta, i) * binom12[i]; i++; } while (((precision > 0) ? fabs(sum_old - sum_new) >= precision * sum_new : sum_old != sum_new) && i < SERIES_TERMS_MAX); //printf("\nDBG:\t%e\t%d\n", theta, i); return sum_new; } double Ffermi_series_sqrt_b(const double k, const double eta, const double theta, const double precision, const int SERIES_TERMS_MAX) { #include "factorial.h" int i; double sum = 0.0; for (i = 0; ((i < SERIES_TERMS_MAX) && (k + 0.5 - i > -1.0)); i++) sum = sum + Ffermi_complete(k - i + 0.5, eta) * pow(0.5 * theta, 0.5 - i) * binom12[i]; //printf("\nDBG:\t%e\t%d\n", theta, i); return sum; } double Ffermi(const double k, const double eta, const double theta) { #if 0 if (fmax(1.0 + k - log(DBL_EPSILON), eta + 1.0 + k - log(DBL_EPSILON)) * theta < sqrt(DBL_EPSILON)) { /* special case for tiny theta relative to 1 and eta */ printf("SPECIAL\t"); return Ffermi_series_sqrt(k, eta, theta); } #endif if (eta > 56000.0) return Ffermi_sommerfeld(k, eta, theta, DBL_EPSILON, 32); else if ((eta < 0.0) && (k > 25.0) && (theta >= 1.0)) return Ffermi_series_neg(k, eta, theta, DBL_EPSILON, 32); else return Ffermi_value(k, eta, theta, PRECISION_GOAL, MAX_REFINE); } long double Ffermi_long(const long double k, const long double eta, const long double theta) { return Ffermi_dblexp_long(k, eta, theta, PRECISION_GOAL, MAX_REFINE); }
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 1; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = sizeof(p) / sizeof(n); padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); int stop = 0; #pragma omp parallel for for (unsigned int i=0;i<p-1;i++) { if (stop == 0) continue; #pragma omp critical { if (modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); stop = 1; } } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 1; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = sizeof(p) / sizeof(n); padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); int stop = 0; for (unsigned int i=0;i<p-1;i++) { if (stop == 0) continue; if (modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); stop = 1; } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 1; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = sizeof(p) / sizeof(n); padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); int stop = 0; #pragma omp parallel for for (unsigned int i=0;i<p-1;i++) { if (stop == 0) continue; #pragma omp critical { if (modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); stop = 1; } } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
omp_parallel_private.c
<ompts:test> <ompts:testdescription>Test which checks the omp parallel private directive.</ompts:testdescription> <ompts:ompversion>3.0</ompts:ompversion> <ompts:directive>omp parallel private</ompts:directive> <ompts:dependences>omp for omp critical</ompts:dependences> <ompts:testcode> #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" //static int sum1 = 789; int <ompts:testcode:functionname>omp_parallel_private</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int sum, num_threads,sum1; </ompts:orphan:vars> int known_sum; sum = 0; <ompts:crosscheck> sum1=0; </ompts:crosscheck> num_threads = 0; <ompts:orphan> printf("before parallel sum1(%p)=%d \n",&sum1, sum1); #pragma omp parallel <ompts:check>private(sum1)</ompts:check> { <ompts:check> sum1 = 7; </ompts:check> printf("before loop sum1(%p)=%d for thread %d\n",&sum1, sum1, omp_get_thread_num()); int i; #pragma omp for for (i = 1; i < 1000; i++) { sum1 = sum1 + i; } /*end of for*/ printf("after loop sum1(%p)=%d for thread %d\n",&sum1, sum1, omp_get_thread_num()); #pragma omp critical { sum = sum + sum1; num_threads++; } /*end of critical*/ } /* end of parallel*/ </ompts:orphan> known_sum = (999 * 1000) / 2 + 7 * num_threads; return (known_sum == sum); } </ompts:testcode> </ompts:test>
< ompts:test > <ompts: testdescription > Test which checks the omp parallel private directive.< /ompts:testdescription > <ompts: ompversion > 3.0 < /ompts:ompversion > <ompts: directive > omp parallel private < /ompts:directive > <ompts:dependences > omp for omp critical < /ompts:dependences > <ompts:testcode > #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" //static int sum1 = 789; int <ompts:testcode:functionname > omp_parallel_private < /ompts:testcode:functionname > (FILE * logFile) { <ompts: orphan:vars > int sum, num_threads, sum1; </ompts: orphan:vars > int known_sum; sum = 0; <ompts:crosscheck > sum1 = 0; </ompts:crosscheck > num_threads = 0; <ompts:orphan > printf("before parallel sum1(%p)=%d \n", &sum1, sum1); <ompts:check > sum1 = 7; </ompts:check > printf("before loop sum1(%p)=%d for thread %d\n", &sum1, sum1, omp_get_thread_num()); int i; for (i = 1; i < 1000; i++) { sum1 = sum1 + i; } /* end of for */ printf("after loop sum1(%p)=%d for thread %d\n", &sum1, sum1, omp_get_thread_num()); sum = sum + sum1; num_threads++; /* end of critical */ /* end of parallel */ </ompts:orphan > known_sum = (999 * 1000) / 2 + 7 * num_threads; return (known_sum == sum); } </ompts:testcode > </ompts:test >
< ompts:test > <ompts: testdescription > Test which checks the omp parallel private directive.< /ompts:testdescription > <ompts: ompversion > 3.0 < /ompts:ompversion > <ompts: directive > omp parallel private < /ompts:directive > <ompts:dependences > omp for omp critical < /ompts:dependences > <ompts:testcode > #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" //static int sum1 = 789; int <ompts:testcode:functionname > omp_parallel_private < /ompts:testcode:functionname > (FILE * logFile) { <ompts: orphan:vars > int sum, num_threads, sum1; </ompts: orphan:vars > int known_sum; sum = 0; <ompts:crosscheck > sum1 = 0; </ompts:crosscheck > num_threads = 0; <ompts:orphan > printf("before parallel sum1(%p)=%d \n", &sum1, sum1); #pragma omp parallel <ompts:check>private(sum1)</ompts:check> { <ompts:check > sum1 = 7; </ompts:check > printf("before loop sum1(%p)=%d for thread %d\n", &sum1, sum1, omp_get_thread_num()); int i; #pragma omp for for (i = 1; i < 1000; i++) { sum1 = sum1 + i; } /* end of for */ printf("after loop sum1(%p)=%d for thread %d\n", &sum1, sum1, omp_get_thread_num()); #pragma omp critical { sum = sum + sum1; num_threads++; } /* end of critical */ } /* end of parallel */ </ompts:orphan > known_sum = (999 * 1000) / 2 + 7 * num_threads; return (known_sum == sum); } </ompts:testcode > </ompts:test >
stencil.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "malloc2D.h" #include "timer.h" #define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp) int main(int argc, char *argv[]) { #pragma omp parallel #pragma omp master printf("Running with %d thread(s)\n",omp_get_num_threads()); /* struct timespec tstart_init, tstart_flush, tstart_stencil, tstart_total; double init_time, flush_time, stencil_time, total_time; int imax=2002, jmax = 2002; double** xtmp; double** x = malloc2D(jmax, imax); double** xnew = malloc2D(jmax, imax); int *flush = (int *)malloc(jmax*imax*sizeof(int)*4); // cpu_timer_start(&tstart_total); // cpu_timer_start(&tstart_init); #pragma omp parallel for for (int j = 0; j < jmax; j++){ for (int i = 0; i < imax; i++){ xnew[j][i] = 0.0; x[j][i] = 5.0; } } #pragma omp parallel for for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){ for (int i = imax/2 - 5; i < imax/2 -1; i++){ x[j][i] = 400.0; } } #ifdef XXX // init_time += cpu_timer_stop(tstart_init); for (int iter = 0; iter < 10000; iter++){ // cpu_timer_start(&tstart_flush); #pragma omp parallel for for (int l = 1; l < jmax*imax*4; l++){ flush[l] = 1.0; } // flush_time += cpu_timer_stop(tstart_flush); // cpu_timer_start(&tstart_stencil); #pragma omp parallel for for (int j = 1; j < jmax-1; j++){ for (int i = 1; i < imax-1; i++){ xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0; } } // stencil_time += cpu_timer_stop(tstart_stencil); SWAP_PTR(xnew, x, xtmp); if (iter%1000 == 0) printf("Iter %d\n",iter); } // total_time += cpu_timer_stop(tstart_total); // printf("Timing is init %f flush %f stencil %f total %f\n", // init_time,flush_time,stencil_time,total_time); #endif */ }
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "malloc2D.h" #include "timer.h" #define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp) int main(int argc, char *argv[]) { printf("Running with %d thread(s)\n", omp_get_num_threads()); /* * struct timespec tstart_init, tstart_flush, tstart_stencil, * tstart_total; double init_time, flush_time, stencil_time, total_time; * int imax=2002, jmax = 2002; * * double** xtmp; double** x = malloc2D(jmax, imax); double** xnew = * malloc2D(jmax, imax); int *flush = (int * *)malloc(jmax*imax*sizeof(int)*4); * * // cpu_timer_start(&tstart_total); // cpu_timer_start(&tstart_init); for * (int j = 0; j < jmax; j++){ for (int i = 0; i < imax; i++){ xnew[j][i] * = 0.0; x[j][i] = 5.0; } } for (int j = jmax/2 - 5; j < jmax/2 + 5; * j++){ for (int i = imax/2 - 5; i < imax/2 -1; i++){ x[j][i] = 400.0; } * } #ifdef XXX // init_time += cpu_timer_stop(tstart_init); * * for (int iter = 0; iter < 10000; iter++){ // * cpu_timer_start(&tstart_flush); for (int l = 1; l < jmax*imax*4; l++){ * flush[l] = 1.0; } // flush_time += cpu_timer_stop(tstart_flush); // * cpu_timer_start(&tstart_stencil); for (int j = 1; j < jmax-1; j++){ * for (int i = 1; i < imax-1; i++){ xnew[j][i] = ( x[j][i] + x[j][i-1] + * x[j][i+1] + x[j-1][i] + x[j+1][i] )/5.0; } } // stencil_time += * cpu_timer_stop(tstart_stencil); * * SWAP_PTR(xnew, x, xtmp); if (iter%1000 == 0) printf("Iter %d\n",iter); } * // total_time += cpu_timer_stop(tstart_total); * * // printf("Timing is init %f flush %f stencil %f total %f\n", // * init_time,flush_time,stencil_time,total_time); #endif */ }
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "malloc2D.h" #include "timer.h" #define SWAP_PTR(xnew,xold,xtmp) (xtmp=xnew, xnew=xold, xold=xtmp) int main(int argc, char *argv[]) { #pragma omp parallel #pragma omp master printf("Running with %d thread(s)\n", omp_get_num_threads()); /* * struct timespec tstart_init, tstart_flush, tstart_stencil, * tstart_total; double init_time, flush_time, stencil_time, total_time; * int imax=2002, jmax = 2002; * * double** xtmp; double** x = malloc2D(jmax, imax); double** xnew = * malloc2D(jmax, imax); int *flush = (int * *)malloc(jmax*imax*sizeof(int)*4); * * // cpu_timer_start(&tstart_total); // cpu_timer_start(&tstart_init); * #pragma omp parallel for for (int j = 0; j < jmax; j++){ for (int i = * 0; i < imax; i++){ xnew[j][i] = 0.0; x[j][i] = 5.0; } } * * #pragma omp parallel for for (int j = jmax/2 - 5; j < jmax/2 + 5; j++){ * for (int i = imax/2 - 5; i < imax/2 -1; i++){ x[j][i] = 400.0; } } * #ifdef XXX // init_time += cpu_timer_stop(tstart_init); * * for (int iter = 0; iter < 10000; iter++){ // * cpu_timer_start(&tstart_flush); #pragma omp parallel for for (int l = * 1; l < jmax*imax*4; l++){ flush[l] = 1.0; } // flush_time += * cpu_timer_stop(tstart_flush); // cpu_timer_start(&tstart_stencil); * #pragma omp parallel for for (int j = 1; j < jmax-1; j++){ for (int i * = 1; i < imax-1; i++){ xnew[j][i] = ( x[j][i] + x[j][i-1] + x[j][i+1] * + x[j-1][i] + x[j+1][i] )/5.0; } } // stencil_time += * cpu_timer_stop(tstart_stencil); * * SWAP_PTR(xnew, x, xtmp); if (iter%1000 == 0) printf("Iter %d\n",iter); } * // total_time += cpu_timer_stop(tstart_total); * * // printf("Timing is init %f flush %f stencil %f total %f\n", // * init_time,flush_time,stencil_time,total_time); #endif */ }
GB_unop__isfinite_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isfinite_bool_fp64) // op(A') function: GB (_unop_tran__isfinite_bool_fp64) // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isfinite_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isfinite_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isfinite_bool_fp64) // op(A') function: GB (_unop_tran__isfinite_bool_fp64) // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isfinite_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isfinite_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isfinite_bool_fp64) // op(A') function: GB (_unop_tran__isfinite_bool_fp64) // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isfinite_bool_fp64) ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isfinite_bool_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__pow_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_int8 // A.*B function (eWiseMult): GB_AemultB__pow_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_int8 // C+=b function (dense accum): GB_Cdense_accumb__pow_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_int8 // C=scalar+B GB_bind1st__pow_int8 // C=scalar+B' GB_bind1st_tran__pow_int8 // C=A+scalar GB_bind2nd__pow_int8 // C=A'+scalar GB_bind2nd_tran__pow_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_pow_int8 (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_pow_int8 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT8 || GxB_NO_POW_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pow_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_pow_int8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = GB_pow_int8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int8 (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int8 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_int8 // A.*B function (eWiseMult): GB_AemultB__pow_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_int8 // C+=b function (dense accum): GB_Cdense_accumb__pow_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_int8 // C=scalar+B GB_bind1st__pow_int8 // C=scalar+B' GB_bind1st_tran__pow_int8 // C=A+scalar GB_bind2nd__pow_int8 // C=A'+scalar GB_bind2nd_tran__pow_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_pow_int8 (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_pow_int8 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT8 || GxB_NO_POW_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pow_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_pow_int8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = GB_pow_int8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int8 (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int8 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pow_int8 // A.*B function (eWiseMult): GB_AemultB__pow_int8 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__pow_int8 // C+=b function (dense accum): GB_Cdense_accumb__pow_int8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pow_int8 // C=scalar+B GB_bind1st__pow_int8 // C=scalar+B' GB_bind1st_tran__pow_int8 // C=A+scalar GB_bind2nd__pow_int8 // C=A'+scalar GB_bind2nd_tran__pow_int8 // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_pow_int8 (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_pow_int8 (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT8 || GxB_NO_POW_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pow_int8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pow_int8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pow_int8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *GB_RESTRICT Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pow_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pow_int8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__pow_int8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t bij = Bx [p] ; Cx [p] = GB_pow_int8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__pow_int8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; Cx [p] = GB_pow_int8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int8 (x, aij) ; \ } GrB_Info GB_bind1st_tran__pow_int8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_pow_int8 (aij, y) ; \ } GrB_Info GB_bind2nd_tran__pow_int8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
functions.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "omp.h" #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { /* Q1.2 Complete this function */ unsigned int length = strlen(string); unsigned int rem = length%charsPerInt; unsigned char pad = ' '; //str_append(*string, pad)*rem; for (int i = rem; i>0; i--) { string[strlen(string)] = ' '+'\0'; } } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { /* Q1.3 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ unsigned int x = 0; unsigned int charsPerInt = Nchars/Nints; #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { unsigned int num = 0; for (unsigned int j = 0; j < charsPerInt; j++) { unsigned char c = *string+j+x; unsigned int charNum = (unsigned int) c; if (charsPerInt - j == 1) { num = num+charNum; } else { num = (num+charNum)*256; } //x += charsPerInt; //*Z[i] = num; } x += charsPerInt; Z[i] = num; } } void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { /* Q1.4 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints; i++) { unsigned int num = Z[i]; unsigned int secChar = num%256; if ((num-secChar) != 0) { unsigned int firstChar = (num -secChar)/256; unsigned char c1 = (unsigned char) firstChar; string[strlen(string)+1] = c1; } unsigned char c2 = (unsigned char) secChar; string[strlen(string)+1] = c2; } }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "omp.h" #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.1 Parallelize this function with OpenMP */ for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.1 Parallelize this function with OpenMP */ for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { /* Q1.2 Complete this function */ unsigned int length = strlen(string); unsigned int rem = length%charsPerInt; unsigned char pad = ' '; //str_append(*string, pad)*rem; for (int i = rem; i>0; i--) { string[strlen(string)] = ' '+'\0'; } } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { /* Q1.3 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ unsigned int x = 0; unsigned int charsPerInt = Nchars/Nints; for (unsigned int i = 0; i < Nints; i++) { unsigned int num = 0; for (unsigned int j = 0; j < charsPerInt; j++) { unsigned char c = *string+j+x; unsigned int charNum = (unsigned int) c; if (charsPerInt - j == 1) { num = num+charNum; } else { num = (num+charNum)*256; } //x += charsPerInt; //*Z[i] = num; } x += charsPerInt; Z[i] = num; } } void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { /* Q1.4 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ for (unsigned int i=0; i<Nints; i++) { unsigned int num = Z[i]; unsigned int secChar = num%256; if ((num-secChar) != 0) { unsigned int firstChar = (num -secChar)/256; unsigned char c1 = (unsigned char) firstChar; string[strlen(string)+1] = c1; } unsigned char c2 = (unsigned char) secChar; string[strlen(string)+1] = c2; } }
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "omp.h" #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.1 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { /* Q1.2 Complete this function */ unsigned int length = strlen(string); unsigned int rem = length%charsPerInt; unsigned char pad = ' '; //str_append(*string, pad)*rem; for (int i = rem; i>0; i--) { string[strlen(string)] = ' '+'\0'; } } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { /* Q1.3 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ unsigned int x = 0; unsigned int charsPerInt = Nchars/Nints; #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { unsigned int num = 0; for (unsigned int j = 0; j < charsPerInt; j++) { unsigned char c = *string+j+x; unsigned int charNum = (unsigned int) c; if (charsPerInt - j == 1) { num = num+charNum; } else { num = (num+charNum)*256; } //x += charsPerInt; //*Z[i] = num; } x += charsPerInt; Z[i] = num; } } void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { /* Q1.4 Complete this function */ /* Q2.2 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints; i++) { unsigned int num = Z[i]; unsigned int secChar = num%256; if ((num-secChar) != 0) { unsigned int firstChar = (num -secChar)/256; unsigned char c1 = (unsigned char) firstChar; string[strlen(string)+1] = c1; } unsigned char c2 = (unsigned char) secChar; string[strlen(string)+1] = c2; } }
GB_transpose_bucket.c
//------------------------------------------------------------------------------ // GB_transpose_bucket: transpose and optionally typecast and/or apply operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = A' or op(A'). Optionally typecasts from A->type to the new type ctype, // and/or optionally applies a unary operator. // If an operator z=op(x) is provided, the type of z must be the same as the // type of C. The type of A must be compatible with the type of of x (A is // typecasted into the type of x). These conditions must be checked in the // caller. // This function is agnostic for the CSR/CSC format of C and A. C_is_csc is // defined by the caller and assigned to C->is_csc, but otherwise unused. // A->is_csc is ignored. // The input can be hypersparse or non-hypersparse. The output C is always // non-hypersparse, and never shallow. On input, C is a static header. // If A is m-by-n in CSC format, with e nonzeros, the time and memory taken is // O(m+n+e) if A is non-hypersparse, or O(m+e) if hypersparse. This is fine if // most rows and columns of A are non-empty, but can be very costly if A or A' // is hypersparse. In particular, if A is a non-hypersparse column vector with // m >> e, the time and memory is O(m), which can be huge. Thus, for // hypersparse matrices, or for very sparse matrices, the qsort method should // be used instead (see GB_transpose). // This method is parallel, but not highly scalable. At most O(e/m) threads // are used. #include "GB_transpose.h" #define GB_FREE_WORKSPACE \ { \ if (Workspaces != NULL && Workspaces_size != NULL) \ { \ for (int tid = 0 ; tid < nworkspaces ; tid++) \ { \ GB_FREE_WORK (&(Workspaces [tid]), Workspaces_size [tid]) ; \ } \ } \ GB_WERK_POP (A_slice, int64_t) ; \ GB_WERK_POP (Workspaces_size, size_t) ; \ GB_WERK_POP (Workspaces, int64_t *) ; \ } #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE_WORKSPACE ; \ } GrB_Info GB_transpose_bucket // bucket transpose; typecast and apply op ( GrB_Matrix C, // output matrix (static header) const GB_iso_code C_code_iso, // iso code for C const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix A, // input matrix // no operator is applied if op is NULL const GB_Operator op, // unary/idxunop/binop to apply const GrB_Scalar scalar, // scalar to bind to binary operator bool binop_bind1st, // if true, binop(x,A) else binop(A,y) const int nworkspaces, // # of workspaces to use const int nthreads, // # of threads to use GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (C != NULL) ; ASSERT (C->static_header) ; ASSERT_TYPE_OK (ctype, "ctype for transpose", GB0) ; ASSERT_MATRIX_OK (A, "A input for transpose_bucket", GB0) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; // if op is NULL, then no operator is applied // This method is only be used when A is sparse or hypersparse. // The full and bitmap cases are handled in GB_transpose. ASSERT (!GB_IS_FULL (A)) ; ASSERT (!GB_IS_BITMAP (A)) ; ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; GB_WERK_DECLARE (A_slice, int64_t) ; // size nthreads+1 GB_WERK_DECLARE (Workspaces, int64_t *) ; // size nworkspaces GB_WERK_DECLARE (Workspaces_size, size_t) ; // size nworkspaces //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- int64_t anz = GB_nnz (A) ; int64_t vlen = A->vlen ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- // # of threads to use in the O(vlen) loops below GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nth = GB_nthreads (vlen, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate C: always sparse //-------------------------------------------------------------------------- // The bucket transpose only works when C is sparse. // A can be sparse or hypersparse. // C->p is allocated but not initialized. GrB_Info info ; // set C->iso = C_iso OK bool C_iso = (C_code_iso != GB_NON_ISO) ; GB_OK (GB_new_bix (&C, true, // sparse, static header ctype, A->vdim, vlen, GB_Ap_malloc, C_is_csc, GxB_SPARSE, true, A->hyper_switch, vlen, anz, true, C_iso, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_PUSH (Workspaces, nworkspaces, int64_t *) ; GB_WERK_PUSH (Workspaces_size, nworkspaces, size_t) ; if (Workspaces == NULL || Workspaces_size == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } bool ok = true ; for (int tid = 0 ; tid < nworkspaces ; tid++) { Workspaces [tid] = GB_MALLOC_WORK (vlen + 1, int64_t, &Workspaces_size [tid]) ; ok = ok && (Workspaces [tid] != NULL) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //========================================================================== // phase1: symbolic analysis //========================================================================== // slice the A matrix, perfectly balanced for one task per thread GB_WERK_PUSH (A_slice, nthreads + 1, int64_t) ; if (A_slice == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (A_slice, A->p, A->nvec, nthreads, true) ; // sum up the row counts and find C->p if (nthreads == 1) { //---------------------------------------------------------------------- // sequential method: A is not sliced //---------------------------------------------------------------------- // Only requires a single int64 workspace of size vlen for a single // thread. The resulting C matrix is not jumbled. // compute the row counts of A. No need to scan the A->p pointers ASSERT (nworkspaces == 1) ; int64_t *restrict workspace = Workspaces [0] ; memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ; const int64_t *restrict Ai = A->i ; for (int64_t p = 0 ; p < anz ; p++) { int64_t i = Ai [p] ; workspace [i]++ ; } // cumulative sum of the workspace, and copy back into C->p GB_cumsum (workspace, vlen, &(C->nvec_nonempty), 1, NULL) ; memcpy (Cp, workspace, (vlen + 1) * sizeof (int64_t)) ; } else if (nworkspaces == 1) { //---------------------------------------------------------------------- // atomic method: A is sliced but workspace is shared //---------------------------------------------------------------------- // Only requires a single int64 workspace of size vlen, shared by all // threads. Scales well, but requires atomics. If the # of rows is // very small and the average row degree is high, this can be very slow // because of contention on the atomic workspace. Otherwise, it is // typically faster than the non-atomic method. The resulting C matrix // is jumbled. // compute the row counts of A. No need to scan the A->p pointers int64_t *restrict workspace = Workspaces [0] ; GB_memset (workspace, 0, (vlen + 1) * sizeof (int64_t), nth) ; const int64_t *restrict Ai = A->i ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t i = Ai [p] ; // update workspace [i]++ automically: GB_ATOMIC_UPDATE workspace [i]++ ; } C->jumbled = true ; // atomic transpose leaves C jumbled // cumulative sum of the workspace, and copy back into C->p GB_cumsum (workspace, vlen, &(C->nvec_nonempty), nth, Context) ; GB_memcpy (Cp, workspace, (vlen+ 1) * sizeof (int64_t), nth) ; } else { //---------------------------------------------------------------------- // non-atomic method //---------------------------------------------------------------------- // compute the row counts of A for each slice, one per thread; This // method is parallel, but not highly scalable. Each thread requires // int64 workspace of size vlen, but no atomics are required. The // resulting C matrix is not jumbled, so this can save work if C needs // to be unjumbled later. ASSERT (nworkspaces == nthreads) ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { // get the row counts for this slice, of size A->vlen int64_t *restrict workspace = Workspaces [tid] ; memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ; for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++) { // iterate over the entries in A(:,j) int64_t j = GBH (Ah, k) ; int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // count one more entry in C(i,:) for this slice int64_t i = Ai [pA] ; workspace [i]++ ; } } } // cumulative sum of the workspaces across the slices int64_t i ; #pragma omp parallel for num_threads(nth) schedule(static) for (i = 0 ; i < vlen ; i++) { int64_t s = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { int64_t *restrict workspace = Workspaces [tid] ; int64_t c = workspace [i] ; workspace [i] = s ; s += c ; } Cp [i] = s ; } Cp [vlen] = 0 ; // compute the vector pointers for C GB_cumsum (Cp, vlen, &(C->nvec_nonempty), nth, Context) ; // add Cp back to all Workspaces #pragma omp parallel for num_threads(nth) schedule(static) for (i = 0 ; i < vlen ; i++) { int64_t s = Cp [i] ; int64_t *restrict workspace = Workspaces [0] ; workspace [i] = s ; for (int tid = 1 ; tid < nthreads ; tid++) { int64_t *restrict workspace = Workspaces [tid] ; workspace [i] += s ; } } } C->magic = GB_MAGIC ; //========================================================================== // phase2: transpose A into C //========================================================================== // transpose both the pattern and the values if (op == NULL) { // do not apply an operator; optional typecast to C->type GB_transpose_ix (C, A, Workspaces, A_slice, nworkspaces, nthreads) ; } else { // apply an operator, C has type op->ztype GB_transpose_op (C, C_code_iso, op, scalar, binop_bind1st, A, Workspaces, A_slice, nworkspaces, nthreads) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C transpose of A", GB0) ; ASSERT (C->h == NULL) ; return (GrB_SUCCESS) ; }
//------------------------------------------------------------------------------ // GB_transpose_bucket: transpose and optionally typecast and/or apply operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = A' or op(A'). Optionally typecasts from A->type to the new type ctype, // and/or optionally applies a unary operator. // If an operator z=op(x) is provided, the type of z must be the same as the // type of C. The type of A must be compatible with the type of of x (A is // typecasted into the type of x). These conditions must be checked in the // caller. // This function is agnostic for the CSR/CSC format of C and A. C_is_csc is // defined by the caller and assigned to C->is_csc, but otherwise unused. // A->is_csc is ignored. // The input can be hypersparse or non-hypersparse. The output C is always // non-hypersparse, and never shallow. On input, C is a static header. // If A is m-by-n in CSC format, with e nonzeros, the time and memory taken is // O(m+n+e) if A is non-hypersparse, or O(m+e) if hypersparse. This is fine if // most rows and columns of A are non-empty, but can be very costly if A or A' // is hypersparse. In particular, if A is a non-hypersparse column vector with // m >> e, the time and memory is O(m), which can be huge. Thus, for // hypersparse matrices, or for very sparse matrices, the qsort method should // be used instead (see GB_transpose). // This method is parallel, but not highly scalable. At most O(e/m) threads // are used. #include "GB_transpose.h" #define GB_FREE_WORKSPACE \ { \ if (Workspaces != NULL && Workspaces_size != NULL) \ { \ for (int tid = 0 ; tid < nworkspaces ; tid++) \ { \ GB_FREE_WORK (&(Workspaces [tid]), Workspaces_size [tid]) ; \ } \ } \ GB_WERK_POP (A_slice, int64_t) ; \ GB_WERK_POP (Workspaces_size, size_t) ; \ GB_WERK_POP (Workspaces, int64_t *) ; \ } #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE_WORKSPACE ; \ } GrB_Info GB_transpose_bucket // bucket transpose; typecast and apply op ( GrB_Matrix C, // output matrix (static header) const GB_iso_code C_code_iso, // iso code for C const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix A, // input matrix // no operator is applied if op is NULL const GB_Operator op, // unary/idxunop/binop to apply const GrB_Scalar scalar, // scalar to bind to binary operator bool binop_bind1st, // if true, binop(x,A) else binop(A,y) const int nworkspaces, // # of workspaces to use const int nthreads, // # of threads to use GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (C != NULL) ; ASSERT (C->static_header) ; ASSERT_TYPE_OK (ctype, "ctype for transpose", GB0) ; ASSERT_MATRIX_OK (A, "A input for transpose_bucket", GB0) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; // if op is NULL, then no operator is applied // This method is only be used when A is sparse or hypersparse. // The full and bitmap cases are handled in GB_transpose. ASSERT (!GB_IS_FULL (A)) ; ASSERT (!GB_IS_BITMAP (A)) ; ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; GB_WERK_DECLARE (A_slice, int64_t) ; // size nthreads+1 GB_WERK_DECLARE (Workspaces, int64_t *) ; // size nworkspaces GB_WERK_DECLARE (Workspaces_size, size_t) ; // size nworkspaces //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- int64_t anz = GB_nnz (A) ; int64_t vlen = A->vlen ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- // # of threads to use in the O(vlen) loops below GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nth = GB_nthreads (vlen, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate C: always sparse //-------------------------------------------------------------------------- // The bucket transpose only works when C is sparse. // A can be sparse or hypersparse. // C->p is allocated but not initialized. GrB_Info info ; // set C->iso = C_iso OK bool C_iso = (C_code_iso != GB_NON_ISO) ; GB_OK (GB_new_bix (&C, true, // sparse, static header ctype, A->vdim, vlen, GB_Ap_malloc, C_is_csc, GxB_SPARSE, true, A->hyper_switch, vlen, anz, true, C_iso, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_PUSH (Workspaces, nworkspaces, int64_t *) ; GB_WERK_PUSH (Workspaces_size, nworkspaces, size_t) ; if (Workspaces == NULL || Workspaces_size == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } bool ok = true ; for (int tid = 0 ; tid < nworkspaces ; tid++) { Workspaces [tid] = GB_MALLOC_WORK (vlen + 1, int64_t, &Workspaces_size [tid]) ; ok = ok && (Workspaces [tid] != NULL) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //========================================================================== // phase1: symbolic analysis //========================================================================== // slice the A matrix, perfectly balanced for one task per thread GB_WERK_PUSH (A_slice, nthreads + 1, int64_t) ; if (A_slice == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (A_slice, A->p, A->nvec, nthreads, true) ; // sum up the row counts and find C->p if (nthreads == 1) { //---------------------------------------------------------------------- // sequential method: A is not sliced //---------------------------------------------------------------------- // Only requires a single int64 workspace of size vlen for a single // thread. The resulting C matrix is not jumbled. // compute the row counts of A. No need to scan the A->p pointers ASSERT (nworkspaces == 1) ; int64_t *restrict workspace = Workspaces [0] ; memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ; const int64_t *restrict Ai = A->i ; for (int64_t p = 0 ; p < anz ; p++) { int64_t i = Ai [p] ; workspace [i]++ ; } // cumulative sum of the workspace, and copy back into C->p GB_cumsum (workspace, vlen, &(C->nvec_nonempty), 1, NULL) ; memcpy (Cp, workspace, (vlen + 1) * sizeof (int64_t)) ; } else if (nworkspaces == 1) { //---------------------------------------------------------------------- // atomic method: A is sliced but workspace is shared //---------------------------------------------------------------------- // Only requires a single int64 workspace of size vlen, shared by all // threads. Scales well, but requires atomics. If the # of rows is // very small and the average row degree is high, this can be very slow // because of contention on the atomic workspace. Otherwise, it is // typically faster than the non-atomic method. The resulting C matrix // is jumbled. // compute the row counts of A. No need to scan the A->p pointers int64_t *restrict workspace = Workspaces [0] ; GB_memset (workspace, 0, (vlen + 1) * sizeof (int64_t), nth) ; const int64_t *restrict Ai = A->i ; int64_t p ; for (p = 0 ; p < anz ; p++) { int64_t i = Ai [p] ; // update workspace [i]++ automically: GB_ATOMIC_UPDATE workspace [i]++ ; } C->jumbled = true ; // atomic transpose leaves C jumbled // cumulative sum of the workspace, and copy back into C->p GB_cumsum (workspace, vlen, &(C->nvec_nonempty), nth, Context) ; GB_memcpy (Cp, workspace, (vlen+ 1) * sizeof (int64_t), nth) ; } else { //---------------------------------------------------------------------- // non-atomic method //---------------------------------------------------------------------- // compute the row counts of A for each slice, one per thread; This // method is parallel, but not highly scalable. Each thread requires // int64 workspace of size vlen, but no atomics are required. The // resulting C matrix is not jumbled, so this can save work if C needs // to be unjumbled later. ASSERT (nworkspaces == nthreads) ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; int tid ; for (tid = 0 ; tid < nthreads ; tid++) { // get the row counts for this slice, of size A->vlen int64_t *restrict workspace = Workspaces [tid] ; memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ; for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++) { // iterate over the entries in A(:,j) int64_t j = GBH (Ah, k) ; int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // count one more entry in C(i,:) for this slice int64_t i = Ai [pA] ; workspace [i]++ ; } } } // cumulative sum of the workspaces across the slices int64_t i ; for (i = 0 ; i < vlen ; i++) { int64_t s = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { int64_t *restrict workspace = Workspaces [tid] ; int64_t c = workspace [i] ; workspace [i] = s ; s += c ; } Cp [i] = s ; } Cp [vlen] = 0 ; // compute the vector pointers for C GB_cumsum (Cp, vlen, &(C->nvec_nonempty), nth, Context) ; // add Cp back to all Workspaces for (i = 0 ; i < vlen ; i++) { int64_t s = Cp [i] ; int64_t *restrict workspace = Workspaces [0] ; workspace [i] = s ; for (int tid = 1 ; tid < nthreads ; tid++) { int64_t *restrict workspace = Workspaces [tid] ; workspace [i] += s ; } } } C->magic = GB_MAGIC ; //========================================================================== // phase2: transpose A into C //========================================================================== // transpose both the pattern and the values if (op == NULL) { // do not apply an operator; optional typecast to C->type GB_transpose_ix (C, A, Workspaces, A_slice, nworkspaces, nthreads) ; } else { // apply an operator, C has type op->ztype GB_transpose_op (C, C_code_iso, op, scalar, binop_bind1st, A, Workspaces, A_slice, nworkspaces, nthreads) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C transpose of A", GB0) ; ASSERT (C->h == NULL) ; return (GrB_SUCCESS) ; }
//------------------------------------------------------------------------------ // GB_transpose_bucket: transpose and optionally typecast and/or apply operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C = A' or op(A'). Optionally typecasts from A->type to the new type ctype, // and/or optionally applies a unary operator. // If an operator z=op(x) is provided, the type of z must be the same as the // type of C. The type of A must be compatible with the type of of x (A is // typecasted into the type of x). These conditions must be checked in the // caller. // This function is agnostic for the CSR/CSC format of C and A. C_is_csc is // defined by the caller and assigned to C->is_csc, but otherwise unused. // A->is_csc is ignored. // The input can be hypersparse or non-hypersparse. The output C is always // non-hypersparse, and never shallow. On input, C is a static header. // If A is m-by-n in CSC format, with e nonzeros, the time and memory taken is // O(m+n+e) if A is non-hypersparse, or O(m+e) if hypersparse. This is fine if // most rows and columns of A are non-empty, but can be very costly if A or A' // is hypersparse. In particular, if A is a non-hypersparse column vector with // m >> e, the time and memory is O(m), which can be huge. Thus, for // hypersparse matrices, or for very sparse matrices, the qsort method should // be used instead (see GB_transpose). // This method is parallel, but not highly scalable. At most O(e/m) threads // are used. #include "GB_transpose.h" #define GB_FREE_WORKSPACE \ { \ if (Workspaces != NULL && Workspaces_size != NULL) \ { \ for (int tid = 0 ; tid < nworkspaces ; tid++) \ { \ GB_FREE_WORK (&(Workspaces [tid]), Workspaces_size [tid]) ; \ } \ } \ GB_WERK_POP (A_slice, int64_t) ; \ GB_WERK_POP (Workspaces_size, size_t) ; \ GB_WERK_POP (Workspaces, int64_t *) ; \ } #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE_WORKSPACE ; \ } GrB_Info GB_transpose_bucket // bucket transpose; typecast and apply op ( GrB_Matrix C, // output matrix (static header) const GB_iso_code C_code_iso, // iso code for C const GrB_Type ctype, // type of output matrix C const bool C_is_csc, // format of output matrix C const GrB_Matrix A, // input matrix // no operator is applied if op is NULL const GB_Operator op, // unary/idxunop/binop to apply const GrB_Scalar scalar, // scalar to bind to binary operator bool binop_bind1st, // if true, binop(x,A) else binop(A,y) const int nworkspaces, // # of workspaces to use const int nthreads, // # of threads to use GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (C != NULL) ; ASSERT (C->static_header) ; ASSERT_TYPE_OK (ctype, "ctype for transpose", GB0) ; ASSERT_MATRIX_OK (A, "A input for transpose_bucket", GB0) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (GB_JUMBLED_OK (A)) ; // if op is NULL, then no operator is applied // This method is only be used when A is sparse or hypersparse. // The full and bitmap cases are handled in GB_transpose. ASSERT (!GB_IS_FULL (A)) ; ASSERT (!GB_IS_BITMAP (A)) ; ASSERT (GB_IS_SPARSE (A) || GB_IS_HYPERSPARSE (A)) ; GB_WERK_DECLARE (A_slice, int64_t) ; // size nthreads+1 GB_WERK_DECLARE (Workspaces, int64_t *) ; // size nworkspaces GB_WERK_DECLARE (Workspaces_size, size_t) ; // size nworkspaces //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- int64_t anz = GB_nnz (A) ; int64_t vlen = A->vlen ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- // # of threads to use in the O(vlen) loops below GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nth = GB_nthreads (vlen, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate C: always sparse //-------------------------------------------------------------------------- // The bucket transpose only works when C is sparse. // A can be sparse or hypersparse. // C->p is allocated but not initialized. GrB_Info info ; // set C->iso = C_iso OK bool C_iso = (C_code_iso != GB_NON_ISO) ; GB_OK (GB_new_bix (&C, true, // sparse, static header ctype, A->vdim, vlen, GB_Ap_malloc, C_is_csc, GxB_SPARSE, true, A->hyper_switch, vlen, anz, true, C_iso, Context)) ; int64_t *restrict Cp = C->p ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_PUSH (Workspaces, nworkspaces, int64_t *) ; GB_WERK_PUSH (Workspaces_size, nworkspaces, size_t) ; if (Workspaces == NULL || Workspaces_size == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } bool ok = true ; for (int tid = 0 ; tid < nworkspaces ; tid++) { Workspaces [tid] = GB_MALLOC_WORK (vlen + 1, int64_t, &Workspaces_size [tid]) ; ok = ok && (Workspaces [tid] != NULL) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //========================================================================== // phase1: symbolic analysis //========================================================================== // slice the A matrix, perfectly balanced for one task per thread GB_WERK_PUSH (A_slice, nthreads + 1, int64_t) ; if (A_slice == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_pslice (A_slice, A->p, A->nvec, nthreads, true) ; // sum up the row counts and find C->p if (nthreads == 1) { //---------------------------------------------------------------------- // sequential method: A is not sliced //---------------------------------------------------------------------- // Only requires a single int64 workspace of size vlen for a single // thread. The resulting C matrix is not jumbled. // compute the row counts of A. No need to scan the A->p pointers ASSERT (nworkspaces == 1) ; int64_t *restrict workspace = Workspaces [0] ; memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ; const int64_t *restrict Ai = A->i ; for (int64_t p = 0 ; p < anz ; p++) { int64_t i = Ai [p] ; workspace [i]++ ; } // cumulative sum of the workspace, and copy back into C->p GB_cumsum (workspace, vlen, &(C->nvec_nonempty), 1, NULL) ; memcpy (Cp, workspace, (vlen + 1) * sizeof (int64_t)) ; } else if (nworkspaces == 1) { //---------------------------------------------------------------------- // atomic method: A is sliced but workspace is shared //---------------------------------------------------------------------- // Only requires a single int64 workspace of size vlen, shared by all // threads. Scales well, but requires atomics. If the # of rows is // very small and the average row degree is high, this can be very slow // because of contention on the atomic workspace. Otherwise, it is // typically faster than the non-atomic method. The resulting C matrix // is jumbled. // compute the row counts of A. No need to scan the A->p pointers int64_t *restrict workspace = Workspaces [0] ; GB_memset (workspace, 0, (vlen + 1) * sizeof (int64_t), nth) ; const int64_t *restrict Ai = A->i ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t i = Ai [p] ; // update workspace [i]++ automically: GB_ATOMIC_UPDATE workspace [i]++ ; } C->jumbled = true ; // atomic transpose leaves C jumbled // cumulative sum of the workspace, and copy back into C->p GB_cumsum (workspace, vlen, &(C->nvec_nonempty), nth, Context) ; GB_memcpy (Cp, workspace, (vlen+ 1) * sizeof (int64_t), nth) ; } else { //---------------------------------------------------------------------- // non-atomic method //---------------------------------------------------------------------- // compute the row counts of A for each slice, one per thread; This // method is parallel, but not highly scalable. Each thread requires // int64 workspace of size vlen, but no atomics are required. The // resulting C matrix is not jumbled, so this can save work if C needs // to be unjumbled later. ASSERT (nworkspaces == nthreads) ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { // get the row counts for this slice, of size A->vlen int64_t *restrict workspace = Workspaces [tid] ; memset (workspace, 0, (vlen + 1) * sizeof (int64_t)) ; for (int64_t k = A_slice [tid] ; k < A_slice [tid+1] ; k++) { // iterate over the entries in A(:,j) int64_t j = GBH (Ah, k) ; int64_t pA_start = Ap [k] ; int64_t pA_end = Ap [k+1] ; for (int64_t pA = pA_start ; pA < pA_end ; pA++) { // count one more entry in C(i,:) for this slice int64_t i = Ai [pA] ; workspace [i]++ ; } } } // cumulative sum of the workspaces across the slices int64_t i ; #pragma omp parallel for num_threads(nth) schedule(static) for (i = 0 ; i < vlen ; i++) { int64_t s = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { int64_t *restrict workspace = Workspaces [tid] ; int64_t c = workspace [i] ; workspace [i] = s ; s += c ; } Cp [i] = s ; } Cp [vlen] = 0 ; // compute the vector pointers for C GB_cumsum (Cp, vlen, &(C->nvec_nonempty), nth, Context) ; // add Cp back to all Workspaces #pragma omp parallel for num_threads(nth) schedule(static) for (i = 0 ; i < vlen ; i++) { int64_t s = Cp [i] ; int64_t *restrict workspace = Workspaces [0] ; workspace [i] = s ; for (int tid = 1 ; tid < nthreads ; tid++) { int64_t *restrict workspace = Workspaces [tid] ; workspace [i] += s ; } } } C->magic = GB_MAGIC ; //========================================================================== // phase2: transpose A into C //========================================================================== // transpose both the pattern and the values if (op == NULL) { // do not apply an operator; optional typecast to C->type GB_transpose_ix (C, A, Workspaces, A_slice, nworkspaces, nthreads) ; } else { // apply an operator, C has type op->ztype GB_transpose_op (C, C_code_iso, op, scalar, binop_bind1st, A, Workspaces, A_slice, nworkspaces, nthreads) ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; ASSERT_MATRIX_OK (C, "C transpose of A", GB0) ; ASSERT (C->h == NULL) ; return (GrB_SUCCESS) ; }
GB_unop__log_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log_fc32_fc32 // op(A') function: GB_unop_tran__log_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = clogf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = clogf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = clogf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = clogf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = clogf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log_fc32_fc32 // op(A') function: GB_unop_tran__log_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = clogf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = clogf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = clogf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = clogf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = clogf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log_fc32_fc32 // op(A') function: GB_unop_tran__log_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = clogf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = clogf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = clogf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = clogf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = clogf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Efficient_RANSAC.h
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // // $URL$ // $Id$ // SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #include <CGAL/license/Shape_detection.h> #include <CGAL/Random.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h> // for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> #include <functional> // boost -------------- #include <CGAL/boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- namespace CGAL { namespace Shape_detection { /*! \ingroup PkgShapeDetectionRANSAC \brief Shape detection algorithm based on the RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits must be a model of `EfficientRANSACTraits`. */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< Property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< Property map to access the unoriented normal of an input point. typedef Shape_base<Traits> Shape; ///< Shape type. typedef Plane<Traits> Plane_shape; ///< %Plane shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. typedef unspecified_type Plane_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`. #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; struct Plane_range : public Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base; Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! Parameters for the shape detection algorithm. They are explained in detail in Section \ref Shape_detection_RANSACParameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} /*! Probability to control search endurance. %Default value is 0.05. A lower probability provides a higher reliability and determinism at the cost of longer running time due to a higher search endurance. It must belong to the interval [0, 1]. */ FT probability; /*! Minimum number of points in a shape. %Default value is 1% of total number of input points. It must belong to the interval [0, +inf). */ std::size_t min_points; /*! Maximum acceptable Euclidean distance between a point and a shape. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT epsilon; /*! Maximum threshold on the dot product between the estimated shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9). %Default value is 0.9 (around 25 degrees). It must belong to the interval [0, 1]. */ FT normal_threshold; /*! Maximum acceptable Euclidean distance between points, which are assumed to be neighbors. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT cluster_epsilon; }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection object. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(nullptr) , m_global_octree(nullptr) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instance including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< Range of input data. Point_map point_map = Point_map(), ///< Property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< Property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`. For example, for registering a plane as detectable shape, you should call `ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(m_options.cluster_epsilon); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(m_options.cluster_epsilon); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = nullptr; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = nullptr; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \param options parameters for shape detection \param callback can be omitted if the algorithm should be run without any callback. It is called regularly when the algorithm is running: the current advancement (between 0.0 and 1.0) is passed as parameter. If it returns `true`, then the algorithm continues its execution normally; if it returns `false`, the algorithm is stopped. Note that this interruption may leave the class in an invalid state. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect(const Parameters &options = Parameters(), const std::function<bool(double)>& callback = std::function<bool(double)>()) { m_options = options; // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } if (callback && !callback(0.)) return false; // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points == (std::numeric_limits<std::size_t>::max)()) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); if (m_options.min_points > m_num_available_points) return true; // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples m_required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); m_required_samples = (std::max<std::size_t>)(m_required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; std::size_t limit_failed_candidates = (std::max)(std::size_t(10000), std::size_t(m_input_iterator_beyond - m_input_iterator_first) / std::size_t(100)); bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Search (remaining_points / min_points) shapes (max 200 per iteration, min 1) std::size_t search_number = (std::min)(std::size_t(200), (std::max)(std::size_t((m_num_available_points - num_invalid) / double(m_options.min_points)), std::size_t(1))); for (std::size_t nb = 0; nb < search_number; ++ nb) { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()( static_cast<unsigned int>(m_num_available_points)); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, m_required_samples); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { if (callback && !callback(num_invalid / double(m_num_total_points))) return false; Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } } if (failed_candidates >= limit_failed_candidates) { force_exit = true; } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = nullptr; } } candidates.back() = nullptr; delete best_candidate; best_candidate = nullptr; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = nullptr; empty++; occupied++; } candidates.resize(empty); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = nullptr; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); // update generated candidates to reflect removal of points generated_candidates = std::size_t(std::pow (1.f - (indices_points_best_candidate.size() / float(m_num_available_points - num_invalid)), 3.f) * generated_candidates); //2.3 Remove the points from the subtrees for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } failed_candidates = 0; best_expected = 0; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive //#pragma omp parallel for best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = nullptr; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = nullptr; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } else if (!keep_searching) ++ generated_candidates; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Plane_shape>` over only the detected planes in the order of detection. Depending on the chosen probability for the detection, the planes are ordered with decreasing size. */ Plane_range planes() const { boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes = boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >(); for (std::size_t i = 0; i < m_extracted_shapes->size(); ++ i) { boost::shared_ptr<Plane_shape> pshape = boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]); // Ignore all shapes other than plane if (pshape != boost::shared_ptr<Plane_shape>()) planes->push_back (pshape); } return Plane_range(planes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() const { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0), boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()( static_cast<unsigned int>(m_global_octree->maxLevel() + 1)); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesn't have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)(std::pow(FT(1) - FT(largest_candidate) / (FT(num_pts) * FT(octree_depth+1) * FT(1 << (m_required_samples - 1))), int(num_candidates)), FT(1)); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; std::size_t m_required_samples; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // // $URL$ // $Id$ // SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #include <CGAL/license/Shape_detection.h> #include <CGAL/Random.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h> // for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> #include <functional> // boost -------------- #include <CGAL/boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- namespace CGAL { namespace Shape_detection { /*! \ingroup PkgShapeDetectionRANSAC \brief Shape detection algorithm based on the RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits must be a model of `EfficientRANSACTraits`. */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< Property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< Property map to access the unoriented normal of an input point. typedef Shape_base<Traits> Shape; ///< Shape type. typedef Plane<Traits> Plane_shape; ///< %Plane shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. typedef unspecified_type Plane_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`. #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; struct Plane_range : public Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base; Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! Parameters for the shape detection algorithm. They are explained in detail in Section \ref Shape_detection_RANSACParameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} /*! Probability to control search endurance. %Default value is 0.05. A lower probability provides a higher reliability and determinism at the cost of longer running time due to a higher search endurance. It must belong to the interval [0, 1]. */ FT probability; /*! Minimum number of points in a shape. %Default value is 1% of total number of input points. It must belong to the interval [0, +inf). */ std::size_t min_points; /*! Maximum acceptable Euclidean distance between a point and a shape. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT epsilon; /*! Maximum threshold on the dot product between the estimated shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9). %Default value is 0.9 (around 25 degrees). It must belong to the interval [0, 1]. */ FT normal_threshold; /*! Maximum acceptable Euclidean distance between points, which are assumed to be neighbors. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT cluster_epsilon; }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection object. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(nullptr) , m_global_octree(nullptr) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instance including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< Range of input data. Point_map point_map = Point_map(), ///< Property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< Property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`. For example, for registering a plane as detectable shape, you should call `ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(m_options.cluster_epsilon); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(m_options.cluster_epsilon); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = nullptr; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = nullptr; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \param options parameters for shape detection \param callback can be omitted if the algorithm should be run without any callback. It is called regularly when the algorithm is running: the current advancement (between 0.0 and 1.0) is passed as parameter. If it returns `true`, then the algorithm continues its execution normally; if it returns `false`, the algorithm is stopped. Note that this interruption may leave the class in an invalid state. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect(const Parameters &options = Parameters(), const std::function<bool(double)>& callback = std::function<bool(double)>()) { m_options = options; // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } if (callback && !callback(0.)) return false; // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points == (std::numeric_limits<std::size_t>::max)()) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); if (m_options.min_points > m_num_available_points) return true; // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples m_required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); m_required_samples = (std::max<std::size_t>)(m_required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; std::size_t limit_failed_candidates = (std::max)(std::size_t(10000), std::size_t(m_input_iterator_beyond - m_input_iterator_first) / std::size_t(100)); bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Search (remaining_points / min_points) shapes (max 200 per iteration, min 1) std::size_t search_number = (std::min)(std::size_t(200), (std::max)(std::size_t((m_num_available_points - num_invalid) / double(m_options.min_points)), std::size_t(1))); for (std::size_t nb = 0; nb < search_number; ++ nb) { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()( static_cast<unsigned int>(m_num_available_points)); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, m_required_samples); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { if (callback && !callback(num_invalid / double(m_num_total_points))) return false; Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } } if (failed_candidates >= limit_failed_candidates) { force_exit = true; } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = nullptr; } } candidates.back() = nullptr; delete best_candidate; best_candidate = nullptr; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = nullptr; empty++; occupied++; } candidates.resize(empty); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = nullptr; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); // update generated candidates to reflect removal of points generated_candidates = std::size_t(std::pow (1.f - (indices_points_best_candidate.size() / float(m_num_available_points - num_invalid)), 3.f) * generated_candidates); //2.3 Remove the points from the subtrees for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } failed_candidates = 0; best_expected = 0; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive // best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = nullptr; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = nullptr; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } else if (!keep_searching) ++ generated_candidates; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Plane_shape>` over only the detected planes in the order of detection. Depending on the chosen probability for the detection, the planes are ordered with decreasing size. */ Plane_range planes() const { boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes = boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >(); for (std::size_t i = 0; i < m_extracted_shapes->size(); ++ i) { boost::shared_ptr<Plane_shape> pshape = boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]); // Ignore all shapes other than plane if (pshape != boost::shared_ptr<Plane_shape>()) planes->push_back (pshape); } return Plane_range(planes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() const { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0), boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()( static_cast<unsigned int>(m_global_octree->maxLevel() + 1)); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesn't have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)(std::pow(FT(1) - FT(largest_candidate) / (FT(num_pts) * FT(octree_depth+1) * FT(1 << (m_required_samples - 1))), int(num_candidates)), FT(1)); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; std::size_t m_required_samples; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // // $URL$ // $Id$ // SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #include <CGAL/license/Shape_detection.h> #include <CGAL/Random.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h> // for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> #include <functional> // boost -------------- #include <CGAL/boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- namespace CGAL { namespace Shape_detection { /*! \ingroup PkgShapeDetectionRANSAC \brief Shape detection algorithm based on the RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits must be a model of `EfficientRANSACTraits`. */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< Property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< Property map to access the unoriented normal of an input point. typedef Shape_base<Traits> Shape; ///< Shape type. typedef Plane<Traits> Plane_shape; ///< %Plane shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. typedef unspecified_type Plane_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`. #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; struct Plane_range : public Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base; Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! Parameters for the shape detection algorithm. They are explained in detail in Section \ref Shape_detection_RANSACParameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} /*! Probability to control search endurance. %Default value is 0.05. A lower probability provides a higher reliability and determinism at the cost of longer running time due to a higher search endurance. It must belong to the interval [0, 1]. */ FT probability; /*! Minimum number of points in a shape. %Default value is 1% of total number of input points. It must belong to the interval [0, +inf). */ std::size_t min_points; /*! Maximum acceptable Euclidean distance between a point and a shape. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT epsilon; /*! Maximum threshold on the dot product between the estimated shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9). %Default value is 0.9 (around 25 degrees). It must belong to the interval [0, 1]. */ FT normal_threshold; /*! Maximum acceptable Euclidean distance between points, which are assumed to be neighbors. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT cluster_epsilon; }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection object. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(nullptr) , m_global_octree(nullptr) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instance including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< Range of input data. Point_map point_map = Point_map(), ///< Property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< Property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`. For example, for registering a plane as detectable shape, you should call `ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(m_options.cluster_epsilon); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(m_options.cluster_epsilon); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = nullptr; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = nullptr; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \param options parameters for shape detection \param callback can be omitted if the algorithm should be run without any callback. It is called regularly when the algorithm is running: the current advancement (between 0.0 and 1.0) is passed as parameter. If it returns `true`, then the algorithm continues its execution normally; if it returns `false`, the algorithm is stopped. Note that this interruption may leave the class in an invalid state. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect(const Parameters &options = Parameters(), const std::function<bool(double)>& callback = std::function<bool(double)>()) { m_options = options; // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } if (callback && !callback(0.)) return false; // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points == (std::numeric_limits<std::size_t>::max)()) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); if (m_options.min_points > m_num_available_points) return true; // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples m_required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); m_required_samples = (std::max<std::size_t>)(m_required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; std::size_t limit_failed_candidates = (std::max)(std::size_t(10000), std::size_t(m_input_iterator_beyond - m_input_iterator_first) / std::size_t(100)); bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Search (remaining_points / min_points) shapes (max 200 per iteration, min 1) std::size_t search_number = (std::min)(std::size_t(200), (std::max)(std::size_t((m_num_available_points - num_invalid) / double(m_options.min_points)), std::size_t(1))); for (std::size_t nb = 0; nb < search_number; ++ nb) { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()( static_cast<unsigned int>(m_num_available_points)); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, m_required_samples); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { if (callback && !callback(num_invalid / double(m_num_total_points))) return false; Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } } if (failed_candidates >= limit_failed_candidates) { force_exit = true; } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = nullptr; } } candidates.back() = nullptr; delete best_candidate; best_candidate = nullptr; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = nullptr; empty++; occupied++; } candidates.resize(empty); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = nullptr; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); // update generated candidates to reflect removal of points generated_candidates = std::size_t(std::pow (1.f - (indices_points_best_candidate.size() / float(m_num_available_points - num_invalid)), 3.f) * generated_candidates); //2.3 Remove the points from the subtrees for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } failed_candidates = 0; best_expected = 0; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive //#pragma omp parallel for best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = nullptr; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = nullptr; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } else if (!keep_searching) ++ generated_candidates; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Plane_shape>` over only the detected planes in the order of detection. Depending on the chosen probability for the detection, the planes are ordered with decreasing size. */ Plane_range planes() const { boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes = boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >(); for (std::size_t i = 0; i < m_extracted_shapes->size(); ++ i) { boost::shared_ptr<Plane_shape> pshape = boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]); // Ignore all shapes other than plane if (pshape != boost::shared_ptr<Plane_shape>()) planes->push_back (pshape); } return Plane_range(planes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() const { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0), boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()( static_cast<unsigned int>(m_global_octree->maxLevel() + 1)); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesn't have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)(std::pow(FT(1) - FT(largest_candidate) / (FT(num_pts) * FT(octree_depth+1) * FT(1 << (m_required_samples - 1))), int(num_candidates)), FT(1)); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; std::size_t m_required_samples; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
master.c
//===================================================================== // MAIN FUNCTION //===================================================================== void master(fp timeinst, fp *initvalu, fp *parameter, fp *finavalu, int mode) { //===================================================================== // VARIABLES //===================================================================== // counters int i; // intermediate output on host fp JCaDyad; fp JCaSL; fp JCaCyt; // offset pointers int initvalu_offset_batch; // int initvalu_offset_ecc; // 46 points int parameter_offset_ecc; int initvalu_offset_Dyad; // 15 points int parameter_offset_Dyad; int initvalu_offset_SL; // 15 points int parameter_offset_SL; int initvalu_offset_Cyt; // 15 poitns int parameter_offset_Cyt; // module parameters fp CaDyad; // from ECC model, *** Converting from [mM] to [uM] *** fp CaSL; // from ECC model, *** Converting from [mM] to [uM] *** fp CaCyt; // from ECC model, *** Converting from [mM] to [uM] *** // thread counters int th_id, nthreads; int th_count[4]; int temp; //===================================================================== // KERNELS FOR 1 WORKLOAD - PARALLEL //===================================================================== nthreads = omp_get_max_threads(); if (mode == 0) { // partition workload between threads temp = 0; for (i = 0; i < 4; i++) { // do for all 4 pieces of work if (temp >= nthreads) { // limit according to number of threads temp = 0; } th_count[i] = temp; // assign thread to piece of work temp = temp + 1; } // run pieces of work in parallel #pragma omp parallel private(th_id) { if (th_id == th_count[1]) { // ecc function initvalu_offset_ecc = 0; // 46 points parameter_offset_ecc = 0; ecc(timeinst, initvalu, initvalu_offset_ecc, parameter, parameter_offset_ecc, finavalu); } if (th_id == th_count[2]) { // cam function for Dyad initvalu_offset_Dyad = 46; // 15 points parameter_offset_Dyad = 1; CaDyad = initvalu[35] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaDyad = cam(timeinst, initvalu, initvalu_offset_Dyad, parameter, parameter_offset_Dyad, finavalu, CaDyad); } if (th_id == th_count[3]) { // cam function for SL initvalu_offset_SL = 61; // 15 points parameter_offset_SL = 6; CaSL = initvalu[36] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaSL = cam(timeinst, initvalu, initvalu_offset_SL, parameter, parameter_offset_SL, finavalu, CaSL); } if (th_id == th_count[4]) { // cam function for Cyt initvalu_offset_Cyt = 76; // 15 poitns parameter_offset_Cyt = 11; CaCyt = initvalu[37] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaCyt = cam(timeinst, initvalu, initvalu_offset_Cyt, parameter, parameter_offset_Cyt, finavalu, CaCyt); } } } //===================================================================== // KERNELS FOR MANY WORKLOAD - SERIAL //===================================================================== else { // ecc function initvalu_offset_ecc = 0; // 46 points parameter_offset_ecc = 0; ecc(timeinst, initvalu, initvalu_offset_ecc, parameter, parameter_offset_ecc, finavalu); // cam function for Dyad initvalu_offset_Dyad = 46; // 15 points parameter_offset_Dyad = 1; CaDyad = initvalu[35] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaDyad = cam(timeinst, initvalu, initvalu_offset_Dyad, parameter, parameter_offset_Dyad, finavalu, CaDyad); // cam function for SL initvalu_offset_SL = 61; // 15 points parameter_offset_SL = 6; CaSL = initvalu[36] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaSL = cam(timeinst, initvalu, initvalu_offset_SL, parameter, parameter_offset_SL, finavalu, CaSL); // cam function for Cyt initvalu_offset_Cyt = 76; // 15 poitns parameter_offset_Cyt = 11; CaCyt = initvalu[37] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaCyt = cam(timeinst, initvalu, initvalu_offset_Cyt, parameter, parameter_offset_Cyt, finavalu, CaCyt); } //===================================================================== // FINAL KERNEL //===================================================================== // final adjustments fin(initvalu, initvalu_offset_ecc, initvalu_offset_Dyad, initvalu_offset_SL, initvalu_offset_Cyt, parameter, finavalu, JCaDyad, JCaSL, JCaCyt); //===================================================================== // COMPENSATION FOR NANs and INFs //===================================================================== // make sure function does not return NANs and INFs for (i = 0; i < EQUATIONS; i++) { if (isnan(finavalu[i]) == 1) { finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001 } else if (isinf(finavalu[i]) == 1) { finavalu[i] = 0.0001; // for INF set rate of change to 0.0001 } } }
//===================================================================== // MAIN FUNCTION //===================================================================== void master(fp timeinst, fp *initvalu, fp *parameter, fp *finavalu, int mode) { //===================================================================== // VARIABLES //===================================================================== // counters int i; // intermediate output on host fp JCaDyad; fp JCaSL; fp JCaCyt; // offset pointers int initvalu_offset_batch; // int initvalu_offset_ecc; // 46 points int parameter_offset_ecc; int initvalu_offset_Dyad; // 15 points int parameter_offset_Dyad; int initvalu_offset_SL; // 15 points int parameter_offset_SL; int initvalu_offset_Cyt; // 15 poitns int parameter_offset_Cyt; // module parameters fp CaDyad; // from ECC model, *** Converting from [mM] to [uM] *** fp CaSL; // from ECC model, *** Converting from [mM] to [uM] *** fp CaCyt; // from ECC model, *** Converting from [mM] to [uM] *** // thread counters int th_id, nthreads; int th_count[4]; int temp; //===================================================================== // KERNELS FOR 1 WORKLOAD - PARALLEL //===================================================================== nthreads = omp_get_max_threads(); if (mode == 0) { // partition workload between threads temp = 0; for (i = 0; i < 4; i++) { // do for all 4 pieces of work if (temp >= nthreads) { // limit according to number of threads temp = 0; } th_count[i] = temp; // assign thread to piece of work temp = temp + 1; } // run pieces of work in parallel if (th_id == th_count[1]) { // ecc function initvalu_offset_ecc = 0; // 46 points parameter_offset_ecc = 0; ecc(timeinst, initvalu, initvalu_offset_ecc, parameter, parameter_offset_ecc, finavalu); } if (th_id == th_count[2]) { // cam function for Dyad initvalu_offset_Dyad = 46; // 15 points parameter_offset_Dyad = 1; CaDyad = initvalu[35] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaDyad = cam(timeinst, initvalu, initvalu_offset_Dyad, parameter, parameter_offset_Dyad, finavalu, CaDyad); } if (th_id == th_count[3]) { // cam function for SL initvalu_offset_SL = 61; // 15 points parameter_offset_SL = 6; CaSL = initvalu[36] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaSL = cam(timeinst, initvalu, initvalu_offset_SL, parameter, parameter_offset_SL, finavalu, CaSL); } if (th_id == th_count[4]) { // cam function for Cyt initvalu_offset_Cyt = 76; // 15 poitns parameter_offset_Cyt = 11; CaCyt = initvalu[37] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaCyt = cam(timeinst, initvalu, initvalu_offset_Cyt, parameter, parameter_offset_Cyt, finavalu, CaCyt); } } //===================================================================== // KERNELS FOR MANY WORKLOAD - SERIAL //===================================================================== else { // ecc function initvalu_offset_ecc = 0; // 46 points parameter_offset_ecc = 0; ecc(timeinst, initvalu, initvalu_offset_ecc, parameter, parameter_offset_ecc, finavalu); // cam function for Dyad initvalu_offset_Dyad = 46; // 15 points parameter_offset_Dyad = 1; CaDyad = initvalu[35] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaDyad = cam(timeinst, initvalu, initvalu_offset_Dyad, parameter, parameter_offset_Dyad, finavalu, CaDyad); // cam function for SL initvalu_offset_SL = 61; // 15 points parameter_offset_SL = 6; CaSL = initvalu[36] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaSL = cam(timeinst, initvalu, initvalu_offset_SL, parameter, parameter_offset_SL, finavalu, CaSL); // cam function for Cyt initvalu_offset_Cyt = 76; // 15 poitns parameter_offset_Cyt = 11; CaCyt = initvalu[37] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaCyt = cam(timeinst, initvalu, initvalu_offset_Cyt, parameter, parameter_offset_Cyt, finavalu, CaCyt); } //===================================================================== // FINAL KERNEL //===================================================================== // final adjustments fin(initvalu, initvalu_offset_ecc, initvalu_offset_Dyad, initvalu_offset_SL, initvalu_offset_Cyt, parameter, finavalu, JCaDyad, JCaSL, JCaCyt); //===================================================================== // COMPENSATION FOR NANs and INFs //===================================================================== // make sure function does not return NANs and INFs for (i = 0; i < EQUATIONS; i++) { if (isnan(finavalu[i]) == 1) { finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001 } else if (isinf(finavalu[i]) == 1) { finavalu[i] = 0.0001; // for INF set rate of change to 0.0001 } } }
//===================================================================== // MAIN FUNCTION //===================================================================== void master(fp timeinst, fp *initvalu, fp *parameter, fp *finavalu, int mode) { //===================================================================== // VARIABLES //===================================================================== // counters int i; // intermediate output on host fp JCaDyad; fp JCaSL; fp JCaCyt; // offset pointers int initvalu_offset_batch; // int initvalu_offset_ecc; // 46 points int parameter_offset_ecc; int initvalu_offset_Dyad; // 15 points int parameter_offset_Dyad; int initvalu_offset_SL; // 15 points int parameter_offset_SL; int initvalu_offset_Cyt; // 15 poitns int parameter_offset_Cyt; // module parameters fp CaDyad; // from ECC model, *** Converting from [mM] to [uM] *** fp CaSL; // from ECC model, *** Converting from [mM] to [uM] *** fp CaCyt; // from ECC model, *** Converting from [mM] to [uM] *** // thread counters int th_id, nthreads; int th_count[4]; int temp; //===================================================================== // KERNELS FOR 1 WORKLOAD - PARALLEL //===================================================================== nthreads = omp_get_max_threads(); if (mode == 0) { // partition workload between threads temp = 0; for (i = 0; i < 4; i++) { // do for all 4 pieces of work if (temp >= nthreads) { // limit according to number of threads temp = 0; } th_count[i] = temp; // assign thread to piece of work temp = temp + 1; } // run pieces of work in parallel #pragma omp parallel private(th_id) { if (th_id == th_count[1]) { // ecc function initvalu_offset_ecc = 0; // 46 points parameter_offset_ecc = 0; ecc(timeinst, initvalu, initvalu_offset_ecc, parameter, parameter_offset_ecc, finavalu); } if (th_id == th_count[2]) { // cam function for Dyad initvalu_offset_Dyad = 46; // 15 points parameter_offset_Dyad = 1; CaDyad = initvalu[35] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaDyad = cam(timeinst, initvalu, initvalu_offset_Dyad, parameter, parameter_offset_Dyad, finavalu, CaDyad); } if (th_id == th_count[3]) { // cam function for SL initvalu_offset_SL = 61; // 15 points parameter_offset_SL = 6; CaSL = initvalu[36] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaSL = cam(timeinst, initvalu, initvalu_offset_SL, parameter, parameter_offset_SL, finavalu, CaSL); } if (th_id == th_count[4]) { // cam function for Cyt initvalu_offset_Cyt = 76; // 15 poitns parameter_offset_Cyt = 11; CaCyt = initvalu[37] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaCyt = cam(timeinst, initvalu, initvalu_offset_Cyt, parameter, parameter_offset_Cyt, finavalu, CaCyt); } } } //===================================================================== // KERNELS FOR MANY WORKLOAD - SERIAL //===================================================================== else { // ecc function initvalu_offset_ecc = 0; // 46 points parameter_offset_ecc = 0; ecc(timeinst, initvalu, initvalu_offset_ecc, parameter, parameter_offset_ecc, finavalu); // cam function for Dyad initvalu_offset_Dyad = 46; // 15 points parameter_offset_Dyad = 1; CaDyad = initvalu[35] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaDyad = cam(timeinst, initvalu, initvalu_offset_Dyad, parameter, parameter_offset_Dyad, finavalu, CaDyad); // cam function for SL initvalu_offset_SL = 61; // 15 points parameter_offset_SL = 6; CaSL = initvalu[36] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaSL = cam(timeinst, initvalu, initvalu_offset_SL, parameter, parameter_offset_SL, finavalu, CaSL); // cam function for Cyt initvalu_offset_Cyt = 76; // 15 poitns parameter_offset_Cyt = 11; CaCyt = initvalu[37] * 1e3; // from ECC model, *** Converting from [mM] to [uM] *** JCaCyt = cam(timeinst, initvalu, initvalu_offset_Cyt, parameter, parameter_offset_Cyt, finavalu, CaCyt); } //===================================================================== // FINAL KERNEL //===================================================================== // final adjustments fin(initvalu, initvalu_offset_ecc, initvalu_offset_Dyad, initvalu_offset_SL, initvalu_offset_Cyt, parameter, finavalu, JCaDyad, JCaSL, JCaCyt); //===================================================================== // COMPENSATION FOR NANs and INFs //===================================================================== // make sure function does not return NANs and INFs for (i = 0; i < EQUATIONS; i++) { if (isnan(finavalu[i]) == 1) { finavalu[i] = 0.0001; // for NAN set rate of change to 0.0001 } else if (isinf(finavalu[i]) == 1) { finavalu[i] = 0.0001; // for INF set rate of change to 0.0001 } } }
hello_openmp.c
/****************************************************************************** * FILE: omp_hello.c * DESCRIPTION: * OpenMP Example - Hello World - C/C++ Version * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
/****************************************************************************** * FILE: omp_hello.c * DESCRIPTION: * OpenMP Example - Hello World - C/C++ Version * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } /* All threads join master thread and disband */ }
/****************************************************************************** * FILE: omp_hello.c * DESCRIPTION: * OpenMP Example - Hello World - C/C++ Version * In this simple example, the master thread forks a parallel region. * All threads in the team obtain their unique thread number and print it. * The master thread only prints the total number of threads. Two OpenMP * library routines are used to obtain the number of threads and each * thread's number. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int nthreads, tid; /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from thread = %d\n", tid); /* Only master thread does this */ if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
5779.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop { /* E := A*B */ #pragma omp parallel for simd schedule(static, 1) for (i = 0; i < _PB_NI; i++) { #pragma omp target teams distribute simd thread_limit(128) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp parallel for simd schedule(static, 1) for (i = 0; i < _PB_NJ; i++) { #pragma omp target teams distribute simd thread_limit(128) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp parallel for simd schedule(static, 1) for (i = 0; i < _PB_NI; i++) { #pragma omp target teams distribute simd thread_limit(128) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk), DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj), DATA_TYPE POLYBENCH_2D(C, NJ, NM, nj, nm), DATA_TYPE POLYBENCH_2D(D, NM, NL, nm, nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i * j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i * (j + 1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i * (j + 3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i * (j + 2)) / nk; } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G, NI, NL, ni, nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E, NI, NJ, ni, nj), DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk), DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj), DATA_TYPE POLYBENCH_2D(F, NJ, NL, nj, nl), DATA_TYPE POLYBENCH_2D(C, NJ, NM, nj, nm), DATA_TYPE POLYBENCH_2D(D, NM, NL, nm, nl), DATA_TYPE POLYBENCH_2D(G, NI, NL, ni, nl)) { int i, j, k; #pragma scop { /* E := A*B */ for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ for (i = 0; i < _PB_NJ; i++) { for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ for (i = 0; i < _PB_NI; i++) { for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char **argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk), DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj), DATA_TYPE POLYBENCH_2D(C, NJ, NM, nj, nm), DATA_TYPE POLYBENCH_2D(D, NM, NL, nm, nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i * j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i * (j + 1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i * (j + 3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i * (j + 2)) / nk; } /* * DCE code. Must scan the entire live-out data. Can be used also to check * the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G, NI, NL, ni, nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* * Main computational kernel. The whole function will be timed, including the * call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E, NI, NJ, ni, nj), DATA_TYPE POLYBENCH_2D(A, NI, NK, ni, nk), DATA_TYPE POLYBENCH_2D(B, NK, NJ, nk, nj), DATA_TYPE POLYBENCH_2D(F, NJ, NL, nj, nl), DATA_TYPE POLYBENCH_2D(C, NJ, NM, nj, nm), DATA_TYPE POLYBENCH_2D(D, NM, NL, nm, nl), DATA_TYPE POLYBENCH_2D(G, NI, NL, ni, nl)) { int i, j, k; #pragma scop { /* E := A*B */ #pragma omp parallel for simd schedule(static, 1) for (i = 0; i < _PB_NI; i++) { #pragma omp target teams distribute simd thread_limit(128) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } } /* F := C*D */ #pragma omp parallel for simd schedule(static, 1) for (i = 0; i < _PB_NJ; i++) { #pragma omp target teams distribute simd thread_limit(128) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } } /* G := E*F */ #pragma omp parallel for simd schedule(static, 1) for (i = 0; i < _PB_NI; i++) { #pragma omp target teams distribute simd thread_limit(128) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } } #pragma endscop } int main(int argc, char **argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm(ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* * Prevent dead-code elimination. All live-out data must be printed by * the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
p2p.c
/* Copyright (c) 2013, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /******************************************************************* NAME: Pipeline PURPOSE: This program tests the efficiency with which point-to-point synchronization can be carried out. It does so by executing a pipelined algorithm on an m*n grid. The first array dimension is distributed among the threads (stripwise decomposition). USAGE: The program takes as input the number of threads, the dimensions of the grid, and the number of iterations on the grid <progname> <# threads> <iterations> <m> <n> The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() bail_out() HISTORY: - Written by Rob Van der Wijngaart, March 2006. - modified by Rob Van der Wijngaart, August 2006: * changed boundary conditions and stencil computation to avoid overflow * introduced multiple iterations over grid and dependency between iterations *******************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> /* define shorthand for indexing a multi-dimensional array */ #define ARRAY(i,j) vector[i+(j)*(m)] /* define shorthand for flag with cache line padding */ #define LINEWORDS 16 #define flag(TID,j) flag[((TID)+(j)*nthread)*LINEWORDS] int main(int argc, char ** argv) { int TID; /* Thread ID */ long m, n; /* grid dimensions */ int i, j, jj, iter, ID; /* dummies */ int iterations; /* number of times to run the pipeline algorithm */ int *flag; /* used for pairwise synchronizations */ int *start, *end; /* starts and ends of grid slices */ int segment_size; double pipeline_time, /* timing parameters */ avgtime; double epsilon = 1.e-8; /* error tolerance */ double corner_val; /* verification value at top right corner of grid */ int nthread_input, /* thread parameters */ nthread; int grp; /* grid line aggregation factor */ int jjsize; /* actual line group size */ double * RESTRICT vector;/* array holding grid values */ long total_length; /* total required length to store grid values */ int num_error=0; /* flag that signals that requested and obtained numbers of threads are the same */ int true, false; /* toggled booleans used for synchronization */ /******************************************************************************* ** process and test input parameters ********************************************************************************/ printf("Parallel Research Kernels version %s\n", PRKVERSION); printf("OpenMP pipeline execution on 2D grid\n"); if (argc != 5 && argc != 6){ printf("Usage: %s <# threads> <# iterations> <first array dimension> ", *argv); printf("<second array dimension> [group factor]\n"); return(EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1){ printf("ERROR: iterations must be >= 1 : %d \n",iterations); exit(EXIT_FAILURE); } m = atol(*++argv); n = atol(*++argv); if (m < 1 || n < 1){ printf("ERROR: grid dimensions must be positive: %ld, %ld \n", m, n); exit(EXIT_FAILURE); } if (argc==6) { grp = atoi(*++argv); if (grp < 1) grp = 1; else if (grp >= n) grp = n-1; } else grp = 1; total_length = sizeof(double)*m*n; vector = (double *) prk_malloc(total_length); if (!vector) { printf("ERROR: Could not allocate space for vector: %ld\n", total_length); exit(EXIT_FAILURE); } if (m<nthread_input) { printf("First grid dimension %ld smaller than number of threads requested: %d\n", m, nthread_input); exit(EXIT_FAILURE); } start = (int *) prk_malloc(2*nthread_input*sizeof(int)); if (!start) { printf("ERROR: Could not allocate space for array of slice boundaries\n"); exit(EXIT_FAILURE); } end = start + nthread_input; start[0] = 0; for (ID=0; ID<nthread_input; ID++) { segment_size = m/nthread_input; if (ID < (m%nthread_input)) segment_size++; if (ID>0) start[ID] = end[ID-1]+1; end[ID] = start[ID]+segment_size-1; } flag = (int *) prk_malloc(sizeof(int)*nthread_input*LINEWORDS*n); if (!flag) { printf("ERROR: COuld not allocate space for synchronization flags\n"); exit(EXIT_FAILURE); } #pragma omp parallel private(i, j, jj, jjsize, TID, iter, true, false) { #pragma omp master { nthread = omp_get_num_threads(); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Number of threads = %d\n",nthread_input); printf("Grid sizes = %ld, %ld\n", m, n); printf("Number of iterations = %d\n", iterations); if (grp > 1) printf("Group factor = %d (cheating!)\n", grp); #if SYNCHRONOUS printf("Neighbor thread handshake = on\n"); #else printf("Neighbor thread handshake = off\n"); #endif } } bail_out(num_error); TID = omp_get_thread_num(); /* clear the array, assuming first-touch memory placement */ for (j=0; j<n; j++) for (i=start[TID]; i<=end[TID]; i++) ARRAY(i,j) = 0.0; /* set boundary values (bottom and left side of grid */ if (TID==0) for (j=0; j<n; j++) ARRAY(start[TID],j) = (double) j; for (i=start[TID]; i<=end[TID]; i++) ARRAY(i,0) = (double) i; /* set flags to zero to indicate no data is available yet */ true = 1; false = !true; for (j=0; j<n; j++) flag(TID,j) = false; /* we need a barrier after setting the flags, to make sure each is visible to all threads, and to synchronize before the iterations start */ #pragma omp barrier for (iter = 0; iter<=iterations; iter++){ #if !SYNCHRONOUS /* true and false toggle each iteration */ true = (iter+1)%2; false = !true; #endif /* start timer after a warmup iteration */ if (iter == 1) { #pragma omp barrier #pragma omp master { pipeline_time = wtime(); } } if (TID==0) { /* first thread waits for corner value to be copied */ while (flag(0,0) == true) { #pragma omp flush } #if SYNCHRONOUS flag(0,0)= true; #pragma omp flush #endif } for (j=1; j<n; j+=grp) { /* apply grouping */ jjsize = MIN(grp, n-j); /* if not on left boundary, wait for left neighbor to produce data */ if (TID > 0) { while (flag(TID-1,j) == false) { #pragma omp flush } #if SYNCHRONOUS flag(TID-1,j)= false; #pragma omp flush #endif } for (jj=j; jj<j+jjsize; jj++) for (i=MAX(start[TID],1); i<= end[TID]; i++) { ARRAY(i,jj) = ARRAY(i-1,jj) + ARRAY(i,jj-1) - ARRAY(i-1,jj-1); } /* if not on right boundary, signal right neighbor it has new data */ if (TID < nthread-1) { #if SYNCHRONOUS while (flag(TID,j) == true) { #pragma omp flush } #endif flag(TID,j) = true; #pragma omp flush } } if (TID==nthread-1) { /* if on right boundary, copy top right corner value to bottom left corner to create dependency and signal completion */ ARRAY(0,0) = -ARRAY(m-1,n-1); #if SYNCHRONOUS while (flag(0,0) == false) { #pragma omp flush } flag(0,0) = false; #else #pragma omp flush flag(0,0) = true; #endif #pragma omp flush } } /* end of iterations */ #pragma omp barrier #pragma omp master { pipeline_time = wtime() - pipeline_time; } } /* end of OPENMP parallel region */ /******************************************************************************* ** Analyze and output results. ********************************************************************************/ /* verify correctness, using top right value; */ corner_val = (double)((iterations+1)*(n+m-2)); if (fabs(ARRAY(m-1,n-1)-corner_val)/corner_val > epsilon) { printf("ERROR: checksum %lf does not match verification value %lf\n", ARRAY(m-1,n-1), corner_val); exit(EXIT_FAILURE); } #if VERBOSE printf("Solution validates; verification value = %lf\n", corner_val); printf("Point-to-point synchronizations/s: %lf\n", ((float)((n-1)*(nthread-1)))/(avgtime)); #else printf("Solution validates\n"); #endif avgtime = pipeline_time/iterations; /* flip the sign of the execution time to indicate cheating */ if (grp>1) avgtime *= -1.0; printf("Rate (MFlops/s): %lf Avg time (s): %lf\n", 1.0E-06 * 2 * ((double)((m-1)*(n-1)))/avgtime, avgtime); exit(EXIT_SUCCESS); }
/******************************************************************* NAME: Pipeline PURPOSE: This program tests the efficiency with which point-to-point synchronization can be carried out. It does so by executing a pipelined algorithm on an m*n grid. The first array dimension is distributed among the threads (stripwise decomposition). USAGE: The program takes as input the number of threads, the dimensions of the grid, and the number of iterations on the grid <progname> <# threads> <iterations> <m> <n> The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() bail_out() HISTORY: - Written by Rob Van der Wijngaart, March 2006. - modified by Rob Van der Wijngaart, August 2006: * changed boundary conditions and stencil computation to avoid overflow * introduced multiple iterations over grid and dependency between iterations *******************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> /* define shorthand for indexing a multi-dimensional array */ #define ARRAY(i,j) vector[i+(j)*(m)] /* define shorthand for flag with cache line padding */ #define LINEWORDS 16 #define flag(TID,j) flag[((TID)+(j)*nthread)*LINEWORDS] int main(int argc, char **argv) { int TID; /* Thread ID */ long m, n; /* grid dimensions */ int i, j, jj, iter, ID; /* dummies */ int iterations; /* number of times to run the pipeline * algorithm */ int *flag; /* used for pairwise synchronizations */ int *start, *end; /* starts and ends of grid slices */ int segment_size; double pipeline_time, /* timing parameters */ avgtime; double epsilon = 1.e-8; /* error tolerance */ double corner_val; /* verification value at top right corner of * grid */ int nthread_input, /* thread parameters */ nthread; int grp; /* grid line aggregation factor */ int jjsize; /* actual line group size */ double *RESTRICT vector; /* array holding grid values */ long total_length; /* total required length to store grid values */ int num_error = 0; /* flag that signals that requested and * obtained numbers of threads are the same */ int true, false; /* toggled booleans used for synchronization */ /******************************************************************************* ** process and test input parameters ********************************************************************************/ printf("Parallel Research Kernels version %s\n", PRKVERSION); printf("OpenMP pipeline execution on 2D grid\n"); if (argc != 5 && argc != 6) { printf("Usage: %s <# threads> <# iterations> <first array dimension> ", *argv); printf("<second array dimension> [group factor]\n"); return (EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1) { printf("ERROR: iterations must be >= 1 : %d \n", iterations); exit(EXIT_FAILURE); } m = atol(*++argv); n = atol(*++argv); if (m < 1 || n < 1) { printf("ERROR: grid dimensions must be positive: %ld, %ld \n", m, n); exit(EXIT_FAILURE); } if (argc == 6) { grp = atoi(*++argv); if (grp < 1) grp = 1; else if (grp >= n) grp = n - 1; } else grp = 1; total_length = sizeof(double) * m * n; vector = (double *)prk_malloc(total_length); if (!vector) { printf("ERROR: Could not allocate space for vector: %ld\n", total_length); exit(EXIT_FAILURE); } if (m < nthread_input) { printf("First grid dimension %ld smaller than number of threads requested: %d\n", m, nthread_input); exit(EXIT_FAILURE); } start = (int *)prk_malloc(2 * nthread_input * sizeof(int)); if (!start) { printf("ERROR: Could not allocate space for array of slice boundaries\n"); exit(EXIT_FAILURE); } end = start + nthread_input; start[0] = 0; for (ID = 0; ID < nthread_input; ID++) { segment_size = m / nthread_input; if (ID < (m % nthread_input)) segment_size++; if (ID > 0) start[ID] = end[ID - 1] + 1; end[ID] = start[ID] + segment_size - 1; } flag = (int *)prk_malloc(sizeof(int) * nthread_input * LINEWORDS * n); if (!flag) { printf("ERROR: COuld not allocate space for synchronization flags\n"); exit(EXIT_FAILURE); } #pragma omp master { nthread = omp_get_num_threads(); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Number of threads = %d\n", nthread_input); printf("Grid sizes = %ld, %ld\n", m, n); printf("Number of iterations = %d\n", iterations); if (grp > 1) printf("Group factor = %d (cheating!)\n", grp); #if SYNCHRONOUS printf("Neighbor thread handshake = on\n"); #else printf("Neighbor thread handshake = off\n"); #endif } } bail_out(num_error); TID = omp_get_thread_num(); /* clear the array, assuming first-touch memory placement */ for (j = 0; j < n; j++) for (i = start[TID]; i <= end[TID]; i++) ARRAY(i, j) = 0.0; /* set boundary values (bottom and left side of grid */ if (TID == 0) for (j = 0; j < n; j++) ARRAY(start[TID], j) = (double)j; for (i = start[TID]; i <= end[TID]; i++) ARRAY(i, 0) = (double)i; /* set flags to zero to indicate no data is available yet */ true = 1; false = !true; for (j = 0; j < n; j++) flag(TID, j) = false; /* * we need a barrier after setting the flags, to make sure each is * visible to all threads, and to synchronize before the iterations start */ for (iter = 0; iter <= iterations; iter++) { #if !SYNCHRONOUS /* true and false toggle each iteration */ true = (iter + 1) % 2; false = !true; #endif /* start timer after a warmup iteration */ if (iter == 1) { pipeline_time = wtime(); } if (TID == 0) { /* first thread waits for corner value to be * copied */ while (flag(0, 0) == true) { } #if SYNCHRONOUS flag(0, 0) = true; #endif } for (j = 1; j < n; j += grp) { /* apply grouping */ jjsize = MIN(grp, n - j); /* * if not on left boundary, wait for left neighbor to produce * data */ if (TID > 0) { while (flag(TID - 1, j) == false) { } #if SYNCHRONOUS flag(TID - 1, j) = false; #endif } for (jj = j; jj < j + jjsize; jj++) for (i = MAX(start[TID], 1); i <= end[TID]; i++) { ARRAY(i, jj) = ARRAY(i - 1, jj) + ARRAY(i, jj - 1) - ARRAY(i - 1, jj - 1); } /* * if not on right boundary, signal right neighbor it has new * data */ if (TID < nthread - 1) { #if SYNCHRONOUS while (flag(TID, j) == true) { } #endif flag(TID, j) = true; } } if (TID == nthread - 1) { /* if on right boundary, copy top right * corner value to bottom left corner to * create dependency and signal completion */ ARRAY(0, 0) = -ARRAY(m - 1, n - 1); #if SYNCHRONOUS while (flag(0, 0) == false) { } flag(0, 0) = false; #else flag(0, 0) = true; #endif } } /* end of iterations */ pipeline_time = wtime() - pipeline_time; /* end of OPENMP parallel region */ /******************************************************************************* ** Analyze and output results. ********************************************************************************/ /* verify correctness, using top right value; */ corner_val = (double)((iterations + 1) * (n + m - 2)); if (fabs(ARRAY(m - 1, n - 1) - corner_val) / corner_val > epsilon) { printf("ERROR: checksum %lf does not match verification value %lf\n", ARRAY(m - 1, n - 1), corner_val); exit(EXIT_FAILURE); } #if VERBOSE printf("Solution validates; verification value = %lf\n", corner_val); printf("Point-to-point synchronizations/s: %lf\n", ((float)((n - 1) * (nthread - 1))) / (avgtime)); #else printf("Solution validates\n"); #endif avgtime = pipeline_time / iterations; /* flip the sign of the execution time to indicate cheating */ if (grp > 1) avgtime *= -1.0; printf("Rate (MFlops/s): %lf Avg time (s): %lf\n", 1.0E-06 * 2 * ((double)((m - 1) * (n - 1))) / avgtime, avgtime); exit(EXIT_SUCCESS); }
/******************************************************************* NAME: Pipeline PURPOSE: This program tests the efficiency with which point-to-point synchronization can be carried out. It does so by executing a pipelined algorithm on an m*n grid. The first array dimension is distributed among the threads (stripwise decomposition). USAGE: The program takes as input the number of threads, the dimensions of the grid, and the number of iterations on the grid <progname> <# threads> <iterations> <m> <n> The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() bail_out() HISTORY: - Written by Rob Van der Wijngaart, March 2006. - modified by Rob Van der Wijngaart, August 2006: * changed boundary conditions and stencil computation to avoid overflow * introduced multiple iterations over grid and dependency between iterations *******************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> /* define shorthand for indexing a multi-dimensional array */ #define ARRAY(i,j) vector[i+(j)*(m)] /* define shorthand for flag with cache line padding */ #define LINEWORDS 16 #define flag(TID,j) flag[((TID)+(j)*nthread)*LINEWORDS] int main(int argc, char **argv) { int TID; /* Thread ID */ long m, n; /* grid dimensions */ int i, j, jj, iter, ID; /* dummies */ int iterations; /* number of times to run the pipeline * algorithm */ int *flag; /* used for pairwise synchronizations */ int *start, *end; /* starts and ends of grid slices */ int segment_size; double pipeline_time, /* timing parameters */ avgtime; double epsilon = 1.e-8; /* error tolerance */ double corner_val; /* verification value at top right corner of * grid */ int nthread_input, /* thread parameters */ nthread; int grp; /* grid line aggregation factor */ int jjsize; /* actual line group size */ double *RESTRICT vector; /* array holding grid values */ long total_length; /* total required length to store grid values */ int num_error = 0; /* flag that signals that requested and * obtained numbers of threads are the same */ int true, false; /* toggled booleans used for synchronization */ /******************************************************************************* ** process and test input parameters ********************************************************************************/ printf("Parallel Research Kernels version %s\n", PRKVERSION); printf("OpenMP pipeline execution on 2D grid\n"); if (argc != 5 && argc != 6) { printf("Usage: %s <# threads> <# iterations> <first array dimension> ", *argv); printf("<second array dimension> [group factor]\n"); return (EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1) { printf("ERROR: iterations must be >= 1 : %d \n", iterations); exit(EXIT_FAILURE); } m = atol(*++argv); n = atol(*++argv); if (m < 1 || n < 1) { printf("ERROR: grid dimensions must be positive: %ld, %ld \n", m, n); exit(EXIT_FAILURE); } if (argc == 6) { grp = atoi(*++argv); if (grp < 1) grp = 1; else if (grp >= n) grp = n - 1; } else grp = 1; total_length = sizeof(double) * m * n; vector = (double *)prk_malloc(total_length); if (!vector) { printf("ERROR: Could not allocate space for vector: %ld\n", total_length); exit(EXIT_FAILURE); } if (m < nthread_input) { printf("First grid dimension %ld smaller than number of threads requested: %d\n", m, nthread_input); exit(EXIT_FAILURE); } start = (int *)prk_malloc(2 * nthread_input * sizeof(int)); if (!start) { printf("ERROR: Could not allocate space for array of slice boundaries\n"); exit(EXIT_FAILURE); } end = start + nthread_input; start[0] = 0; for (ID = 0; ID < nthread_input; ID++) { segment_size = m / nthread_input; if (ID < (m % nthread_input)) segment_size++; if (ID > 0) start[ID] = end[ID - 1] + 1; end[ID] = start[ID] + segment_size - 1; } flag = (int *)prk_malloc(sizeof(int) * nthread_input * LINEWORDS * n); if (!flag) { printf("ERROR: COuld not allocate space for synchronization flags\n"); exit(EXIT_FAILURE); } #pragma omp parallel private(i, j, jj, jjsize, TID, iter, true, false) { #pragma omp master { nthread = omp_get_num_threads(); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Number of threads = %d\n", nthread_input); printf("Grid sizes = %ld, %ld\n", m, n); printf("Number of iterations = %d\n", iterations); if (grp > 1) printf("Group factor = %d (cheating!)\n", grp); #if SYNCHRONOUS printf("Neighbor thread handshake = on\n"); #else printf("Neighbor thread handshake = off\n"); #endif } } bail_out(num_error); TID = omp_get_thread_num(); /* clear the array, assuming first-touch memory placement */ for (j = 0; j < n; j++) for (i = start[TID]; i <= end[TID]; i++) ARRAY(i, j) = 0.0; /* set boundary values (bottom and left side of grid */ if (TID == 0) for (j = 0; j < n; j++) ARRAY(start[TID], j) = (double)j; for (i = start[TID]; i <= end[TID]; i++) ARRAY(i, 0) = (double)i; /* set flags to zero to indicate no data is available yet */ true = 1; false = !true; for (j = 0; j < n; j++) flag(TID, j) = false; /* * we need a barrier after setting the flags, to make sure each is * visible to all threads, and to synchronize before the iterations * start */ #pragma omp barrier for (iter = 0; iter <= iterations; iter++) { #if !SYNCHRONOUS /* true and false toggle each iteration */ true = (iter + 1) % 2; false = !true; #endif /* start timer after a warmup iteration */ if (iter == 1) { #pragma omp barrier #pragma omp master { pipeline_time = wtime(); } } if (TID == 0) { /* first thread waits for corner value to be * copied */ while (flag(0, 0) == true) { #pragma omp flush } #if SYNCHRONOUS flag(0, 0) = true; #pragma omp flush #endif } for (j = 1; j < n; j += grp) { /* apply grouping */ jjsize = MIN(grp, n - j); /* * if not on left boundary, wait for left neighbor to * produce data */ if (TID > 0) { while (flag(TID - 1, j) == false) { #pragma omp flush } #if SYNCHRONOUS flag(TID - 1, j) = false; #pragma omp flush #endif } for (jj = j; jj < j + jjsize; jj++) for (i = MAX(start[TID], 1); i <= end[TID]; i++) { ARRAY(i, jj) = ARRAY(i - 1, jj) + ARRAY(i, jj - 1) - ARRAY(i - 1, jj - 1); } /* * if not on right boundary, signal right neighbor it has new * data */ if (TID < nthread - 1) { #if SYNCHRONOUS while (flag(TID, j) == true) { #pragma omp flush } #endif flag(TID, j) = true; #pragma omp flush } } if (TID == nthread - 1) { /* if on right boundary, copy top right * corner value to bottom left corner to * create dependency and signal completion */ ARRAY(0, 0) = -ARRAY(m - 1, n - 1); #if SYNCHRONOUS while (flag(0, 0) == false) { #pragma omp flush } flag(0, 0) = false; #else #pragma omp flush flag(0, 0) = true; #endif #pragma omp flush } } /* end of iterations */ #pragma omp barrier #pragma omp master { pipeline_time = wtime() - pipeline_time; } } /* end of OPENMP parallel region */ /******************************************************************************* ** Analyze and output results. ********************************************************************************/ /* verify correctness, using top right value; */ corner_val = (double)((iterations + 1) * (n + m - 2)); if (fabs(ARRAY(m - 1, n - 1) - corner_val) / corner_val > epsilon) { printf("ERROR: checksum %lf does not match verification value %lf\n", ARRAY(m - 1, n - 1), corner_val); exit(EXIT_FAILURE); } #if VERBOSE printf("Solution validates; verification value = %lf\n", corner_val); printf("Point-to-point synchronizations/s: %lf\n", ((float)((n - 1) * (nthread - 1))) / (avgtime)); #else printf("Solution validates\n"); #endif avgtime = pipeline_time / iterations; /* flip the sign of the execution time to indicate cheating */ if (grp > 1) avgtime *= -1.0; printf("Rate (MFlops/s): %lf Avg time (s): %lf\n", 1.0E-06 * 2 * ((double)((m - 1) * (n - 1))) / avgtime, avgtime); exit(EXIT_SUCCESS); }
matrix_stat.h
#ifndef MATRIX_STAT_H_ #define MATRIX_STAT_H_ #include <vector> #include <algorithm> namespace acspo { template <typename T> double sum(const matrix<T> &mat) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std::isnan(mat(i))) { ret += mat(i); count++; } } if (count == 0) { return NAN; } return ret; } template <typename T> double mean(const matrix<T> &mat) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std::isnan(mat(i))) { ret += mat(i); count++; } } if (count == 0) { return NAN; } ret /= count; return ret; } template <typename T> double var(const matrix<T> &mat, double avg) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std::isnan(mat(i))) { ret += (mat(i)-avg)*(mat(i)-avg); count++; } } if (count == 0) { return NAN; } ret /= count; return ret; } template <typename T> double var(const matrix<T> &mat) { return var(mat, mean(mat)); } template <typename T> double std_dev(const matrix<T> &mat, double avg) { return std::sqrt(var(mat, avg)); } template <typename T> double std_dev(const matrix<T> &mat) { return std::sqrt(var(mat)); } template <typename T> double med(const matrix<T> &mat) { unsigned int elem = mat.elem(); std::vector<double> buf; buf.reserve(elem); for (unsigned int i = 0; i < elem; i++) { if (!std::isnan(mat(i))) { buf.push_back(mat(i)); } } if (buf.size() == 0) { return NAN; } std::sort(buf.begin(), buf.end()); if (buf.size() % 2 == 1) { return buf[(buf.size()-1)/2]; } else { return 0.5*(buf[buf.size()/2-1]+buf[buf.size()/2]); } } } #endif
#ifndef MATRIX_STAT_H_ #define MATRIX_STAT_H_ #include <vector> #include <algorithm> namespace acspo { template < typename T > double sum(const matrix < T > &mat) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; for (unsigned int i = 0; i < elem; i++) { if (!std: :isnan(mat(i))) { ret += mat(i); count++; } } if (count == 0) { return NAN; } return ret; } template < typename T > double mean(const matrix < T > &mat) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; for (unsigned int i = 0; i < elem; i++) { if (!std: :isnan(mat(i))) { ret += mat(i); count++; } } if (count == 0) { return NAN; } ret /= count; return ret; } template < typename T > double var(const matrix < T > &mat, double avg) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; for (unsigned int i = 0; i < elem; i++) { if (!std: :isnan(mat(i))) { ret += (mat(i) - avg) * (mat(i) - avg); count++; } } if (count == 0) { return NAN; } ret /= count; return ret; } template < typename T > double var(const matrix < T > &mat) { return var(mat, mean(mat)); } template < typename T > double std_dev(const matrix < T > &mat, double avg) { return std::sqrt(var(mat, avg)); } template < typename T > double std_dev(const matrix < T > &mat) { return std::sqrt(var(mat)); } template < typename T > double med(const matrix < T > &mat) { unsigned int elem = mat.elem(); std: : vector < double >buf; buf.reserve(elem); for (unsigned int i = 0; i < elem; i++) { if (!std: :isnan(mat(i))) { buf.push_back(mat(i)); } } if (buf.size() == 0) { return NAN; } std: : sort(buf.begin(), buf.end()); if (buf.size() % 2 == 1) { return buf[(buf.size() - 1) / 2]; } else { return 0.5 * (buf[buf.size() / 2 - 1] + buf[buf.size() / 2]); } } } #endif
#ifndef MATRIX_STAT_H_ #define MATRIX_STAT_H_ #include <vector> #include <algorithm> namespace acspo { template < typename T > double sum(const matrix < T > &mat) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std: :isnan(mat(i))) { ret += mat(i); count++; } } if (count == 0) { return NAN; } return ret; } template < typename T > double mean(const matrix < T > &mat) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std: :isnan(mat(i))) { ret += mat(i); count++; } } if (count == 0) { return NAN; } ret /= count; return ret; } template < typename T > double var(const matrix < T > &mat, double avg) { unsigned int elem = mat.elem(); unsigned int count = 0; double ret = 0; #pragma omp parallel for reduction(+:ret,count) for (unsigned int i = 0; i < elem; i++) { if (!std: :isnan(mat(i))) { ret += (mat(i) - avg) * (mat(i) - avg); count++; } } if (count == 0) { return NAN; } ret /= count; return ret; } template < typename T > double var(const matrix < T > &mat) { return var(mat, mean(mat)); } template < typename T > double std_dev(const matrix < T > &mat, double avg) { return std::sqrt(var(mat, avg)); } template < typename T > double std_dev(const matrix < T > &mat) { return std::sqrt(var(mat)); } template < typename T > double med(const matrix < T > &mat) { unsigned int elem = mat.elem(); std: : vector < double >buf; buf.reserve(elem); for (unsigned int i = 0; i < elem; i++) { if (!std: :isnan(mat(i))) { buf.push_back(mat(i)); } } if (buf.size() == 0) { return NAN; } std: : sort(buf.begin(), buf.end()); if (buf.size() % 2 == 1) { return buf[(buf.size() - 1) / 2]; } else { return 0.5 * (buf[buf.size() / 2 - 1] + buf[buf.size() / 2]); } } } #endif
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; LIKWID_MARKER_THREADINIT; LIKWID_MARKER_START("calc"); #endif int num_threads = 1; for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = coef[0][i][j][k] * A[(t) % 2][i][j][k] + coef[1][i][j][k] * (A[(t) % 2][i - 1][j][k] + A[(t) % 2][i + 1][j][k]) + coef[2][i][j][k] * (A[(t) % 2][i][j - 1][k] + A[(t) % 2][i][j + 1][k]) + coef[3][i][j][k] * (A[(t) % 2][i][j][k - 1] + A[(t) % 2][i][j][k + 1]) + coef[4][i][j][k] * (A[(t) % 2][i - 2][j][k] + A[(t) % 2][i + 2][j][k]) + coef[5][i][j][k] * (A[(t) % 2][i][j - 2][k] + A[(t) % 2][i][j + 2][k]) + coef[6][i][j][k] * (A[(t) % 2][i][j][k - 2] + A[(t) % 2][i][j][k + 2]) + coef[7][i][j][k] * (A[(t) % 2][i - 3][j][k] + A[(t) % 2][i + 3][j][k]) + coef[8][i][j][k] * (A[(t) % 2][i][j - 3][k] + A[(t) % 2][i][j + 3][k]) + coef[9][i][j][k] * (A[(t) % 2][i][j][k - 3] + A[(t) % 2][i][j][k + 3]) + coef[10][i][j][k] * (A[(t) % 2][i - 4][j][k] + A[(t) % 2][i + 4][j][k]) + coef[11][i][j][k] * (A[(t) % 2][i][j - 4][k] + A[(t) % 2][i][j + 4][k]) + coef[12][i][j][k] * (A[(t) % 2][i][j][k - 4] + A[(t) % 2][i][j][k + 4]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON LIKWID_MARKER_STOP("calc"); LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* * Subtract the `struct timeval' values X and Y, storing the result in * RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* * Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1]) + 8; Ny = atoi(argv[2]) + 8; Nz = atoi(argv[3]) + 8; } if (argc > 4) Nt = atoi(argv[4]); //allocate the arrays double ****A = (double ****)malloc(sizeof(double ***) * 2); for (m = 0; m < 2; m++) { A[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { A[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { A[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } double ****coef = (double ****)malloc(sizeof(double ***) * 13); for (m = 0; m < 13; m++) { coef[m] = (double ***)malloc(sizeof(double **) * Nz); for (i = 0; i < Nz; i++) { coef[m][i] = (double **)malloc(sizeof(double *) * Ny); for (j = 0; j < Ny; j++) { coef[m][i][j] = (double *)malloc(sizeof(double) * Nx); } } } //tile size information, including extra element to decide the list length int *tile_size = (int *)malloc(sizeof(int)); tile_size[0] = -1; //The list is modified here before source - to - source transformations tile_size = (int *)realloc((void *)tile_size, sizeof(int) * 5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; //for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff = 1.e100; const int BASE = 1024; //initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m = 0; m < 13; m++) { for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for (test = 0; test < TESTS; test++) { gettimeofday(&start, 0); //serial execution - Addition: 6 && Multiplication:2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz - 4; i++) { for (j = 4; j < Ny - 4; j++) { for (k = 4; k < Nx - 4; k++) { A[(t + 1) % 2][i][j][k] = coef[0][i][j][k] * A[(t) % 2][i][j][k] + coef[1][i][j][k] * (A[(t) % 2][i - 1][j][k] + A[(t) % 2][i + 1][j][k]) + coef[2][i][j][k] * (A[(t) % 2][i][j - 1][k] + A[(t) % 2][i][j + 1][k]) + coef[3][i][j][k] * (A[(t) % 2][i][j][k - 1] + A[(t) % 2][i][j][k + 1]) + coef[4][i][j][k] * (A[(t) % 2][i - 2][j][k] + A[(t) % 2][i + 2][j][k]) + coef[5][i][j][k] * (A[(t) % 2][i][j - 2][k] + A[(t) % 2][i][j + 2][k]) + coef[6][i][j][k] * (A[(t) % 2][i][j][k - 2] + A[(t) % 2][i][j][k + 2]) + coef[7][i][j][k] * (A[(t) % 2][i - 3][j][k] + A[(t) % 2][i + 3][j][k]) + coef[8][i][j][k] * (A[(t) % 2][i][j - 3][k] + A[(t) % 2][i][j + 3][k]) + coef[9][i][j][k] * (A[(t) % 2][i][j][k - 3] + A[(t) % 2][i][j][k + 3]) + coef[10][i][j][k] * (A[(t) % 2][i - 4][j][k] + A[(t) % 2][i + 4][j][k]) + coef[11][i][j][k] * (A[(t) % 2][i][j - 4][k] + A[(t) % 2][i][j + 4][k]) + coef[12][i][j][k] * (A[(t) % 2][i][j][k - 4] + A[(t) % 2][i][j][k + 4]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif //Free allocated arrays for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for (m = 0; m < 13; m++) { for (i = 0; i < Nz; i++) { for (j = 0; j < Ny; j++) { free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
cover-test.c
/* * Copyright © 2015 RISC OS Open Ltd * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that * copyright notice and this permission notice appear in supporting * documentation, and that the name of the copyright holders not be used in * advertising or publicity pertaining to distribution of the software without * specific, written prior permission. The copyright holders make no * representations about the suitability of this software for any purpose. It * is provided "as is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. * * Author: Ben Avison (bavison@riscosopen.org) * */ /* * This test aims to verify both numerical correctness and the honouring of * array bounds for scaled plots (both nearest-neighbour and bilinear) at or * close to the boundary conditions for applicability of "cover" type fast paths * and iter fetch routines. * * It has a secondary purpose: by setting the env var EXACT (to any value) it * will only test plots that are exactly on the boundary condition. This makes * it possible to ensure that "cover" routines are being used to the maximum, * although this requires the use of a debugger or code instrumentation to * verify. */ #include "utils.h" #include <stdlib.h> #include <stdio.h> /* Approximate limits for random scale factor generation - these ensure we can * get at least 8x reduction and 8x enlargement. */ #define LOG2_MAX_FACTOR (3) /* 1/sqrt(2) (or sqrt(0.5), or 2^-0.5) as a 0.32 fixed-point number */ #define INV_SQRT_2_0POINT32_FIXED (0xB504F334u) /* The largest increment that can be generated by random_scale_factor(). * This occurs when the "mantissa" part is 0xFFFFFFFF and the "exponent" * part is -LOG2_MAX_FACTOR. */ #define MAX_INC ((pixman_fixed_t) \ (INV_SQRT_2_0POINT32_FIXED >> (31 - 16 - LOG2_MAX_FACTOR))) /* Minimum source width (in pixels) based on a typical page size of 4K and * maximum colour depth of 32bpp. */ #define MIN_SRC_WIDTH (4096 / 4) /* Derive the destination width so that at max increment we fit within source */ #define DST_WIDTH (MIN_SRC_WIDTH * pixman_fixed_1 / MAX_INC) /* Calculate heights the other way round. * No limits due to page alignment here. */ #define DST_HEIGHT 3 #define SRC_HEIGHT ((DST_HEIGHT * MAX_INC + pixman_fixed_1 - 1) / pixman_fixed_1) /* At the time of writing, all the scaled fast paths use SRC, OVER or ADD * Porter-Duff operators. XOR is included in the list to ensure good * representation of iter scanline fetch routines. */ static const pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_XOR, }; /* At the time of writing, all the scaled fast paths use a8r8g8b8, x8r8g8b8 * or r5g6b5, or red-blue swapped versions of the same. When a mask channel is * used, it is always a8 (and so implicitly not component alpha). a1r5g5b5 is * included because it is the only other format to feature in any iters. */ static const pixman_format_code_t img_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, PIXMAN_r5g6b5, PIXMAN_a1r5g5b5 }; /* This is a flag reflecting the environment variable EXACT. It can be used * to ensure that source coordinates corresponding exactly to the "cover" limits * are used, rather than any "near misses". This can, for example, be used in * conjunction with a debugger to ensure that only COVER fast paths are used. */ static int exact; static pixman_image_t * create_src_image (pixman_format_code_t fmt) { pixman_image_t *tmp_img, *img; /* We need the left-most and right-most MIN_SRC_WIDTH pixels to have * predictable values, even though fence_image_create_bits() may allocate * an image somewhat larger than that, by an amount that varies depending * upon the page size on the current platform. The solution is to create a * temporary non-fenced image that is exactly MIN_SRC_WIDTH wide and blit it * into the fenced image. */ tmp_img = pixman_image_create_bits (fmt, MIN_SRC_WIDTH, SRC_HEIGHT, NULL, 0); if (tmp_img == NULL) return NULL; img = fence_image_create_bits (fmt, MIN_SRC_WIDTH, SRC_HEIGHT, TRUE); if (img == NULL) { pixman_image_unref (tmp_img); return NULL; } prng_randmemset (tmp_img->bits.bits, tmp_img->bits.rowstride * SRC_HEIGHT * sizeof (uint32_t), 0); image_endian_swap (tmp_img); pixman_image_composite (PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, 0, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_composite (PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, img->bits.width - MIN_SRC_WIDTH, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_unref (tmp_img); return img; } static pixman_fixed_t random_scale_factor(void) { /* Get a random number with top bit set. */ uint32_t f = prng_rand () | 0x80000000u; /* In log(2) space, this is still approximately evenly spread between 31 * and 32. Divide by sqrt(2) to centre the distribution on 2^31. */ f = ((uint64_t) f * INV_SQRT_2_0POINT32_FIXED) >> 32; /* Now shift right (ie divide by an integer power of 2) to spread the * distribution between centres at 2^(16 +/- LOG2_MAX_FACTOR). */ f >>= 31 - 16 + prng_rand_n (2 * LOG2_MAX_FACTOR + 1) - LOG2_MAX_FACTOR; return f; } static pixman_fixed_t calc_translate (int dst_size, int src_size, pixman_fixed_t scale, pixman_bool_t low_align, pixman_bool_t bilinear) { pixman_fixed_t ref_src, ref_dst, scaled_dst; if (low_align) { ref_src = bilinear ? pixman_fixed_1 / 2 : pixman_fixed_e; ref_dst = pixman_fixed_1 / 2; } else { ref_src = pixman_int_to_fixed (src_size) - bilinear * pixman_fixed_1 / 2; ref_dst = pixman_int_to_fixed (dst_size) - pixman_fixed_1 / 2; } scaled_dst = ((uint64_t) ref_dst * scale + pixman_fixed_1 / 2) / pixman_fixed_1; /* We need the translation to be set such that when ref_dst is fed through * the transformation matrix, we get ref_src as the result. */ return ref_src - scaled_dst; } static pixman_fixed_t random_offset (void) { pixman_fixed_t offset = 0; /* Ensure we test the exact case quite a lot */ if (prng_rand_n (2)) return offset; /* What happens when we are close to the edge of the first * interpolation step? */ if (prng_rand_n (2)) offset += (pixman_fixed_1 >> BILINEAR_INTERPOLATION_BITS) - 16; /* Try fine-grained variations */ offset += prng_rand_n (32); /* Test in both directions */ if (prng_rand_n (2)) offset = -offset; return offset; } static void check_transform (pixman_image_t *dst_img, pixman_image_t *src_img, pixman_transform_t *transform, pixman_bool_t bilinear) { pixman_vector_t v1, v2; v1.vector[0] = pixman_fixed_1 / 2; v1.vector[1] = pixman_fixed_1 / 2; v1.vector[2] = pixman_fixed_1; assert (pixman_transform_point (transform, &v1)); v2.vector[0] = pixman_int_to_fixed (dst_img->bits.width) - pixman_fixed_1 / 2; v2.vector[1] = pixman_int_to_fixed (dst_img->bits.height) - pixman_fixed_1 / 2; v2.vector[2] = pixman_fixed_1; assert (pixman_transform_point (transform, &v2)); if (bilinear) { assert (v1.vector[0] >= pixman_fixed_1 / 2); assert (v1.vector[1] >= pixman_fixed_1 / 2); assert (v2.vector[0] <= pixman_int_to_fixed (src_img->bits.width) - pixman_fixed_1 / 2); assert (v2.vector[1] <= pixman_int_to_fixed (src_img->bits.height) - pixman_fixed_1 / 2); } else { assert (v1.vector[0] >= pixman_fixed_e); assert (v1.vector[1] >= pixman_fixed_e); assert (v2.vector[0] <= pixman_int_to_fixed (src_img->bits.width)); assert (v2.vector[1] <= pixman_int_to_fixed (src_img->bits.height)); } } static uint32_t test_cover (int testnum, int verbose) { pixman_fixed_t x_scale, y_scale; pixman_bool_t left_align, top_align; pixman_bool_t bilinear; pixman_filter_t filter; pixman_op_t op; size_t src_fmt_index; pixman_format_code_t src_fmt, dst_fmt, mask_fmt; pixman_image_t *src_img, *dst_img, *mask_img; pixman_transform_t src_transform, mask_transform; pixman_fixed_t fuzz[4]; uint32_t crc32; /* We allocate one fenced image for each pixel format up-front. This is to * avoid spending a lot of time on memory management rather than on testing * Pixman optimisations. We need one per thread because the transformation * matrices and filtering are properties of the source and mask images. */ static pixman_image_t *src_imgs[ARRAY_LENGTH (img_fmt_list)]; static pixman_image_t *mask_bits_img; static pixman_bool_t fence_images_created; #ifdef USE_OPENMP #pragma omp threadprivate (src_imgs) #pragma omp threadprivate (mask_bits_img) #pragma omp threadprivate (fence_images_created) #endif if (!fence_images_created) { int i; prng_srand (0); for (i = 0; i < ARRAY_LENGTH (img_fmt_list); i++) src_imgs[i] = create_src_image (img_fmt_list[i]); mask_bits_img = create_src_image (PIXMAN_a8); fence_images_created = TRUE; } prng_srand (testnum); x_scale = random_scale_factor (); y_scale = random_scale_factor (); left_align = prng_rand_n (2); top_align = prng_rand_n (2); bilinear = prng_rand_n (2); filter = bilinear ? PIXMAN_FILTER_BILINEAR : PIXMAN_FILTER_NEAREST; op = op_list[prng_rand_n (ARRAY_LENGTH (op_list))]; dst_fmt = img_fmt_list[prng_rand_n (ARRAY_LENGTH (img_fmt_list))]; dst_img = pixman_image_create_bits (dst_fmt, DST_WIDTH, DST_HEIGHT, NULL, 0); prng_randmemset (dst_img->bits.bits, dst_img->bits.rowstride * DST_HEIGHT * sizeof (uint32_t), 0); image_endian_swap (dst_img); src_fmt_index = prng_rand_n (ARRAY_LENGTH (img_fmt_list)); src_fmt = img_fmt_list[src_fmt_index]; src_img = src_imgs[src_fmt_index]; pixman_image_set_filter (src_img, filter, NULL, 0); pixman_transform_init_scale (&src_transform, x_scale, y_scale); src_transform.matrix[0][2] = calc_translate (dst_img->bits.width, src_img->bits.width, x_scale, left_align, bilinear); src_transform.matrix[1][2] = calc_translate (dst_img->bits.height, src_img->bits.height, y_scale, top_align, bilinear); if (prng_rand_n (2)) { /* No mask */ mask_fmt = PIXMAN_null; mask_img = NULL; } else if (prng_rand_n (2)) { /* a8 bitmap mask */ mask_fmt = PIXMAN_a8; mask_img = mask_bits_img; pixman_image_set_filter (mask_img, filter, NULL, 0); pixman_transform_init_scale (&mask_transform, x_scale, y_scale); mask_transform.matrix[0][2] = calc_translate (dst_img->bits.width, mask_img->bits.width, x_scale, left_align, bilinear); mask_transform.matrix[1][2] = calc_translate (dst_img->bits.height, mask_img->bits.height, y_scale, top_align, bilinear); } else { /* Solid mask */ pixman_color_t color; memset (&color, 0xAA, sizeof color); mask_fmt = PIXMAN_solid; mask_img = pixman_image_create_solid_fill (&color); } if (!exact) { int i = 0; while (i < 4) fuzz[i++] = random_offset (); src_transform.matrix[0][2] += fuzz[0]; src_transform.matrix[1][2] += fuzz[1]; mask_transform.matrix[0][2] += fuzz[2]; mask_transform.matrix[1][2] += fuzz[3]; } pixman_image_set_transform (src_img, &src_transform); if (mask_fmt == PIXMAN_a8) pixman_image_set_transform (mask_img, &mask_transform); if (verbose) { printf ("op=%s\n", operator_name (op)); printf ("src_fmt=%s, dst_fmt=%s, mask_fmt=%s\n", format_name (src_fmt), format_name (dst_fmt), format_name (mask_fmt)); printf ("x_scale=0x%08X, y_scale=0x%08X, align %s/%s, %s\n", x_scale, y_scale, left_align ? "left" : "right", top_align ? "top" : "bottom", bilinear ? "bilinear" : "nearest"); if (!exact) { int i = 0; printf ("fuzz factors"); while (i < 4) printf (" %d", fuzz[i++]); printf ("\n"); } } if (exact) { check_transform (dst_img, src_img, &src_transform, bilinear); if (mask_fmt == PIXMAN_a8) check_transform (dst_img, mask_img, &mask_transform, bilinear); } pixman_image_composite (op, src_img, mask_img, dst_img, 0, 0, 0, 0, 0, 0, dst_img->bits.width, dst_img->bits.height); if (verbose) print_image (dst_img); crc32 = compute_crc32_for_image (0, dst_img); pixman_image_unref (dst_img); if (mask_fmt == PIXMAN_solid) pixman_image_unref (mask_img); return crc32; } #if BILINEAR_INTERPOLATION_BITS == 7 #define CHECKSUM_FUZZ 0x6B56F607 #define CHECKSUM_EXACT 0xA669F4A3 #elif BILINEAR_INTERPOLATION_BITS == 4 #define CHECKSUM_FUZZ 0x83119ED0 #define CHECKSUM_EXACT 0x0D3382CD #else #define CHECKSUM_FUZZ 0x00000000 #define CHECKSUM_EXACT 0x00000000 #endif int main (int argc, const char *argv[]) { unsigned long page_size; page_size = fence_get_page_size (); if (page_size == 0 || page_size > 16 * 1024) return 77; /* automake SKIP */ exact = getenv ("EXACT") != NULL; if (exact) printf ("Doing plots that are exactly aligned to boundaries\n"); return fuzzer_test_main ("cover", 2000000, exact ? CHECKSUM_EXACT : CHECKSUM_FUZZ, test_cover, argc, argv); }
/* * This test aims to verify both numerical correctness and the honouring of * array bounds for scaled plots (both nearest-neighbour and bilinear) at or * close to the boundary conditions for applicability of "cover" type fast * paths and iter fetch routines. * * It has a secondary purpose: by setting the env var EXACT (to any value) it * will only test plots that are exactly on the boundary condition. This * makes it possible to ensure that "cover" routines are being used to the * maximum, although this requires the use of a debugger or code * instrumentation to verify. */ #include "utils.h" #include <stdlib.h> #include <stdio.h> /* * Approximate limits for random scale factor generation - these ensure we * can get at least 8x reduction and 8x enlargement. */ #define LOG2_MAX_FACTOR (3) /* 1/sqrt(2) (or sqrt(0.5), or 2^-0.5) as a 0.32 fixed-point number */ #define INV_SQRT_2_0POINT32_FIXED (0xB504F334u) /* * The largest increment that can be generated by random_scale_factor(). This * occurs when the "mantissa" part is 0xFFFFFFFF and the "exponent" part is * -LOG2_MAX_FACTOR. */ #define MAX_INC ((pixman_fixed_t) \ (INV_SQRT_2_0POINT32_FIXED >> (31 - 16 - LOG2_MAX_FACTOR))) /* * Minimum source width (in pixels) based on a typical page size of 4K and * maximum colour depth of 32bpp. */ #define MIN_SRC_WIDTH (4096 / 4) /* Derive the destination width so that at max increment we fit within source */ #define DST_WIDTH (MIN_SRC_WIDTH * pixman_fixed_1 / MAX_INC) /* * Calculate heights the other way round. No limits due to page alignment * here. */ #define DST_HEIGHT 3 #define SRC_HEIGHT ((DST_HEIGHT * MAX_INC + pixman_fixed_1 - 1) / pixman_fixed_1) /* * At the time of writing, all the scaled fast paths use SRC, OVER or ADD * Porter-Duff operators. XOR is included in the list to ensure good * representation of iter scanline fetch routines. */ static const pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_XOR, }; /* * At the time of writing, all the scaled fast paths use a8r8g8b8, x8r8g8b8 * or r5g6b5, or red-blue swapped versions of the same. When a mask channel * is used, it is always a8 (and so implicitly not component alpha). a1r5g5b5 * is included because it is the only other format to feature in any iters. */ static const pixman_format_code_t img_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, PIXMAN_r5g6b5, PIXMAN_a1r5g5b5 }; /* * This is a flag reflecting the environment variable EXACT. It can be used * to ensure that source coordinates corresponding exactly to the "cover" * limits are used, rather than any "near misses". This can, for example, be * used in conjunction with a debugger to ensure that only COVER fast paths * are used. */ static int exact; static pixman_image_t * create_src_image(pixman_format_code_t fmt) { pixman_image_t *tmp_img, *img; /* * We need the left-most and right-most MIN_SRC_WIDTH pixels to have * predictable values, even though fence_image_create_bits() may allocate * an image somewhat larger than that, by an amount that varies depending * upon the page size on the current platform. The solution is to create * a temporary non-fenced image that is exactly MIN_SRC_WIDTH wide and * blit it into the fenced image. */ tmp_img = pixman_image_create_bits(fmt, MIN_SRC_WIDTH, SRC_HEIGHT, NULL, 0); if (tmp_img == NULL) return NULL; img = fence_image_create_bits(fmt, MIN_SRC_WIDTH, SRC_HEIGHT, TRUE); if (img == NULL) { pixman_image_unref(tmp_img); return NULL; } prng_randmemset(tmp_img->bits.bits, tmp_img->bits.rowstride * SRC_HEIGHT * sizeof(uint32_t), 0); image_endian_swap(tmp_img); pixman_image_composite(PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, 0, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_composite(PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, img->bits.width - MIN_SRC_WIDTH, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_unref(tmp_img); return img; } static pixman_fixed_t random_scale_factor(void) { /* Get a random number with top bit set. */ uint32_t f = prng_rand() | 0x80000000u; /* * In log(2) space, this is still approximately evenly spread between 31 * and 32. Divide by sqrt(2) to centre the distribution on 2^31. */ f = ((uint64_t) f * INV_SQRT_2_0POINT32_FIXED) >> 32; /* * Now shift right (ie divide by an integer power of 2) to spread the * distribution between centres at 2^(16 +/- LOG2_MAX_FACTOR). */ f >>= 31 - 16 + prng_rand_n(2 * LOG2_MAX_FACTOR + 1) - LOG2_MAX_FACTOR; return f; } static pixman_fixed_t calc_translate(int dst_size, int src_size, pixman_fixed_t scale, pixman_bool_t low_align, pixman_bool_t bilinear) { pixman_fixed_t ref_src, ref_dst, scaled_dst; if (low_align) { ref_src = bilinear ? pixman_fixed_1 / 2 : pixman_fixed_e; ref_dst = pixman_fixed_1 / 2; } else { ref_src = pixman_int_to_fixed(src_size) - bilinear * pixman_fixed_1 / 2; ref_dst = pixman_int_to_fixed(dst_size) - pixman_fixed_1 / 2; } scaled_dst = ((uint64_t) ref_dst * scale + pixman_fixed_1 / 2) / pixman_fixed_1; /* * We need the translation to be set such that when ref_dst is fed * through the transformation matrix, we get ref_src as the result. */ return ref_src - scaled_dst; } static pixman_fixed_t random_offset(void) { pixman_fixed_t offset = 0; /* Ensure we test the exact case quite a lot */ if (prng_rand_n(2)) return offset; /* * What happens when we are close to the edge of the first interpolation * step? */ if (prng_rand_n(2)) offset += (pixman_fixed_1 >> BILINEAR_INTERPOLATION_BITS) - 16; /* Try fine-grained variations */ offset += prng_rand_n(32); /* Test in both directions */ if (prng_rand_n(2)) offset = -offset; return offset; } static void check_transform(pixman_image_t * dst_img, pixman_image_t * src_img, pixman_transform_t * transform, pixman_bool_t bilinear) { pixman_vector_t v1, v2; v1.vector[0] = pixman_fixed_1 / 2; v1.vector[1] = pixman_fixed_1 / 2; v1.vector[2] = pixman_fixed_1; assert(pixman_transform_point(transform, &v1)); v2.vector[0] = pixman_int_to_fixed(dst_img->bits.width) - pixman_fixed_1 / 2; v2.vector[1] = pixman_int_to_fixed(dst_img->bits.height) - pixman_fixed_1 / 2; v2.vector[2] = pixman_fixed_1; assert(pixman_transform_point(transform, &v2)); if (bilinear) { assert(v1.vector[0] >= pixman_fixed_1 / 2); assert(v1.vector[1] >= pixman_fixed_1 / 2); assert(v2.vector[0] <= pixman_int_to_fixed(src_img->bits.width) - pixman_fixed_1 / 2); assert(v2.vector[1] <= pixman_int_to_fixed(src_img->bits.height) - pixman_fixed_1 / 2); } else { assert(v1.vector[0] >= pixman_fixed_e); assert(v1.vector[1] >= pixman_fixed_e); assert(v2.vector[0] <= pixman_int_to_fixed(src_img->bits.width)); assert(v2.vector[1] <= pixman_int_to_fixed(src_img->bits.height)); } } static uint32_t test_cover(int testnum, int verbose) { pixman_fixed_t x_scale, y_scale; pixman_bool_t left_align, top_align; pixman_bool_t bilinear; pixman_filter_t filter; pixman_op_t op; size_t src_fmt_index; pixman_format_code_t src_fmt, dst_fmt, mask_fmt; pixman_image_t *src_img, *dst_img, *mask_img; pixman_transform_t src_transform, mask_transform; pixman_fixed_t fuzz[4]; uint32_t crc32; /* * We allocate one fenced image for each pixel format up-front. This is * to avoid spending a lot of time on memory management rather than on * testing Pixman optimisations. We need one per thread because the * transformation matrices and filtering are properties of the source and * mask images. */ static pixman_image_t *src_imgs[ARRAY_LENGTH(img_fmt_list)]; static pixman_image_t *mask_bits_img; static pixman_bool_t fence_images_created; if (!fence_images_created) { int i; prng_srand(0); for (i = 0; i < ARRAY_LENGTH(img_fmt_list); i++) src_imgs[i] = create_src_image(img_fmt_list[i]); mask_bits_img = create_src_image(PIXMAN_a8); fence_images_created = TRUE; } prng_srand(testnum); x_scale = random_scale_factor(); y_scale = random_scale_factor(); left_align = prng_rand_n(2); top_align = prng_rand_n(2); bilinear = prng_rand_n(2); filter = bilinear ? PIXMAN_FILTER_BILINEAR : PIXMAN_FILTER_NEAREST; op = op_list[prng_rand_n(ARRAY_LENGTH(op_list))]; dst_fmt = img_fmt_list[prng_rand_n(ARRAY_LENGTH(img_fmt_list))]; dst_img = pixman_image_create_bits(dst_fmt, DST_WIDTH, DST_HEIGHT, NULL, 0); prng_randmemset(dst_img->bits.bits, dst_img->bits.rowstride * DST_HEIGHT * sizeof(uint32_t), 0); image_endian_swap(dst_img); src_fmt_index = prng_rand_n(ARRAY_LENGTH(img_fmt_list)); src_fmt = img_fmt_list[src_fmt_index]; src_img = src_imgs[src_fmt_index]; pixman_image_set_filter(src_img, filter, NULL, 0); pixman_transform_init_scale(&src_transform, x_scale, y_scale); src_transform.matrix[0][2] = calc_translate(dst_img->bits.width, src_img->bits.width, x_scale, left_align, bilinear); src_transform.matrix[1][2] = calc_translate(dst_img->bits.height, src_img->bits.height, y_scale, top_align, bilinear); if (prng_rand_n(2)) { /* No mask */ mask_fmt = PIXMAN_null; mask_img = NULL; } else if (prng_rand_n(2)) { /* a8 bitmap mask */ mask_fmt = PIXMAN_a8; mask_img = mask_bits_img; pixman_image_set_filter(mask_img, filter, NULL, 0); pixman_transform_init_scale(&mask_transform, x_scale, y_scale); mask_transform.matrix[0][2] = calc_translate(dst_img->bits.width, mask_img->bits.width, x_scale, left_align, bilinear); mask_transform.matrix[1][2] = calc_translate(dst_img->bits.height, mask_img->bits.height, y_scale, top_align, bilinear); } else { /* Solid mask */ pixman_color_t color; memset(&color, 0xAA, sizeof color); mask_fmt = PIXMAN_solid; mask_img = pixman_image_create_solid_fill(&color); } if (!exact) { int i = 0; while (i < 4) fuzz[i++] = random_offset(); src_transform.matrix[0][2] += fuzz[0]; src_transform.matrix[1][2] += fuzz[1]; mask_transform.matrix[0][2] += fuzz[2]; mask_transform.matrix[1][2] += fuzz[3]; } pixman_image_set_transform(src_img, &src_transform); if (mask_fmt == PIXMAN_a8) pixman_image_set_transform(mask_img, &mask_transform); if (verbose) { printf("op=%s\n", operator_name(op)); printf("src_fmt=%s, dst_fmt=%s, mask_fmt=%s\n", format_name(src_fmt), format_name(dst_fmt), format_name(mask_fmt)); printf("x_scale=0x%08X, y_scale=0x%08X, align %s/%s, %s\n", x_scale, y_scale, left_align ? "left" : "right", top_align ? "top" : "bottom", bilinear ? "bilinear" : "nearest"); if (!exact) { int i = 0; printf("fuzz factors"); while (i < 4) printf(" %d", fuzz[i++]); printf("\n"); } } if (exact) { check_transform(dst_img, src_img, &src_transform, bilinear); if (mask_fmt == PIXMAN_a8) check_transform(dst_img, mask_img, &mask_transform, bilinear); } pixman_image_composite(op, src_img, mask_img, dst_img, 0, 0, 0, 0, 0, 0, dst_img->bits.width, dst_img->bits.height); if (verbose) print_image(dst_img); crc32 = compute_crc32_for_image(0, dst_img); pixman_image_unref(dst_img); if (mask_fmt == PIXMAN_solid) pixman_image_unref(mask_img); return crc32; } #if BILINEAR_INTERPOLATION_BITS == 7 #define CHECKSUM_FUZZ 0x6B56F607 #define CHECKSUM_EXACT 0xA669F4A3 #elif BILINEAR_INTERPOLATION_BITS == 4 #define CHECKSUM_FUZZ 0x83119ED0 #define CHECKSUM_EXACT 0x0D3382CD #else #define CHECKSUM_FUZZ 0x00000000 #define CHECKSUM_EXACT 0x00000000 #endif int main(int argc, const char *argv[]) { unsigned long page_size; page_size = fence_get_page_size(); if (page_size == 0 || page_size > 16 * 1024) return 77; /* automake SKIP */ exact = getenv("EXACT") != NULL; if (exact) printf("Doing plots that are exactly aligned to boundaries\n"); return fuzzer_test_main("cover", 2000000, exact ? CHECKSUM_EXACT : CHECKSUM_FUZZ, test_cover, argc, argv); }
/* * This test aims to verify both numerical correctness and the honouring of * array bounds for scaled plots (both nearest-neighbour and bilinear) at or * close to the boundary conditions for applicability of "cover" type fast * paths and iter fetch routines. * * It has a secondary purpose: by setting the env var EXACT (to any value) it * will only test plots that are exactly on the boundary condition. This * makes it possible to ensure that "cover" routines are being used to the * maximum, although this requires the use of a debugger or code * instrumentation to verify. */ #include "utils.h" #include <stdlib.h> #include <stdio.h> /* * Approximate limits for random scale factor generation - these ensure we * can get at least 8x reduction and 8x enlargement. */ #define LOG2_MAX_FACTOR (3) /* 1/sqrt(2) (or sqrt(0.5), or 2^-0.5) as a 0.32 fixed-point number */ #define INV_SQRT_2_0POINT32_FIXED (0xB504F334u) /* * The largest increment that can be generated by random_scale_factor(). This * occurs when the "mantissa" part is 0xFFFFFFFF and the "exponent" part is * -LOG2_MAX_FACTOR. */ #define MAX_INC ((pixman_fixed_t) \ (INV_SQRT_2_0POINT32_FIXED >> (31 - 16 - LOG2_MAX_FACTOR))) /* * Minimum source width (in pixels) based on a typical page size of 4K and * maximum colour depth of 32bpp. */ #define MIN_SRC_WIDTH (4096 / 4) /* Derive the destination width so that at max increment we fit within source */ #define DST_WIDTH (MIN_SRC_WIDTH * pixman_fixed_1 / MAX_INC) /* * Calculate heights the other way round. No limits due to page alignment * here. */ #define DST_HEIGHT 3 #define SRC_HEIGHT ((DST_HEIGHT * MAX_INC + pixman_fixed_1 - 1) / pixman_fixed_1) /* * At the time of writing, all the scaled fast paths use SRC, OVER or ADD * Porter-Duff operators. XOR is included in the list to ensure good * representation of iter scanline fetch routines. */ static const pixman_op_t op_list[] = { PIXMAN_OP_SRC, PIXMAN_OP_OVER, PIXMAN_OP_ADD, PIXMAN_OP_XOR, }; /* * At the time of writing, all the scaled fast paths use a8r8g8b8, x8r8g8b8 * or r5g6b5, or red-blue swapped versions of the same. When a mask channel * is used, it is always a8 (and so implicitly not component alpha). a1r5g5b5 * is included because it is the only other format to feature in any iters. */ static const pixman_format_code_t img_fmt_list[] = { PIXMAN_a8r8g8b8, PIXMAN_x8r8g8b8, PIXMAN_r5g6b5, PIXMAN_a1r5g5b5 }; /* * This is a flag reflecting the environment variable EXACT. It can be used * to ensure that source coordinates corresponding exactly to the "cover" * limits are used, rather than any "near misses". This can, for example, be * used in conjunction with a debugger to ensure that only COVER fast paths * are used. */ static int exact; static pixman_image_t * create_src_image(pixman_format_code_t fmt) { pixman_image_t *tmp_img, *img; /* * We need the left-most and right-most MIN_SRC_WIDTH pixels to have * predictable values, even though fence_image_create_bits() may allocate * an image somewhat larger than that, by an amount that varies depending * upon the page size on the current platform. The solution is to create * a temporary non-fenced image that is exactly MIN_SRC_WIDTH wide and * blit it into the fenced image. */ tmp_img = pixman_image_create_bits(fmt, MIN_SRC_WIDTH, SRC_HEIGHT, NULL, 0); if (tmp_img == NULL) return NULL; img = fence_image_create_bits(fmt, MIN_SRC_WIDTH, SRC_HEIGHT, TRUE); if (img == NULL) { pixman_image_unref(tmp_img); return NULL; } prng_randmemset(tmp_img->bits.bits, tmp_img->bits.rowstride * SRC_HEIGHT * sizeof(uint32_t), 0); image_endian_swap(tmp_img); pixman_image_composite(PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, 0, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_composite(PIXMAN_OP_SRC, tmp_img, NULL, img, 0, 0, 0, 0, img->bits.width - MIN_SRC_WIDTH, 0, MIN_SRC_WIDTH, SRC_HEIGHT); pixman_image_unref(tmp_img); return img; } static pixman_fixed_t random_scale_factor(void) { /* Get a random number with top bit set. */ uint32_t f = prng_rand() | 0x80000000u; /* * In log(2) space, this is still approximately evenly spread between 31 * and 32. Divide by sqrt(2) to centre the distribution on 2^31. */ f = ((uint64_t) f * INV_SQRT_2_0POINT32_FIXED) >> 32; /* * Now shift right (ie divide by an integer power of 2) to spread the * distribution between centres at 2^(16 +/- LOG2_MAX_FACTOR). */ f >>= 31 - 16 + prng_rand_n(2 * LOG2_MAX_FACTOR + 1) - LOG2_MAX_FACTOR; return f; } static pixman_fixed_t calc_translate(int dst_size, int src_size, pixman_fixed_t scale, pixman_bool_t low_align, pixman_bool_t bilinear) { pixman_fixed_t ref_src, ref_dst, scaled_dst; if (low_align) { ref_src = bilinear ? pixman_fixed_1 / 2 : pixman_fixed_e; ref_dst = pixman_fixed_1 / 2; } else { ref_src = pixman_int_to_fixed(src_size) - bilinear * pixman_fixed_1 / 2; ref_dst = pixman_int_to_fixed(dst_size) - pixman_fixed_1 / 2; } scaled_dst = ((uint64_t) ref_dst * scale + pixman_fixed_1 / 2) / pixman_fixed_1; /* * We need the translation to be set such that when ref_dst is fed * through the transformation matrix, we get ref_src as the result. */ return ref_src - scaled_dst; } static pixman_fixed_t random_offset(void) { pixman_fixed_t offset = 0; /* Ensure we test the exact case quite a lot */ if (prng_rand_n(2)) return offset; /* * What happens when we are close to the edge of the first interpolation * step? */ if (prng_rand_n(2)) offset += (pixman_fixed_1 >> BILINEAR_INTERPOLATION_BITS) - 16; /* Try fine-grained variations */ offset += prng_rand_n(32); /* Test in both directions */ if (prng_rand_n(2)) offset = -offset; return offset; } static void check_transform(pixman_image_t * dst_img, pixman_image_t * src_img, pixman_transform_t * transform, pixman_bool_t bilinear) { pixman_vector_t v1, v2; v1.vector[0] = pixman_fixed_1 / 2; v1.vector[1] = pixman_fixed_1 / 2; v1.vector[2] = pixman_fixed_1; assert(pixman_transform_point(transform, &v1)); v2.vector[0] = pixman_int_to_fixed(dst_img->bits.width) - pixman_fixed_1 / 2; v2.vector[1] = pixman_int_to_fixed(dst_img->bits.height) - pixman_fixed_1 / 2; v2.vector[2] = pixman_fixed_1; assert(pixman_transform_point(transform, &v2)); if (bilinear) { assert(v1.vector[0] >= pixman_fixed_1 / 2); assert(v1.vector[1] >= pixman_fixed_1 / 2); assert(v2.vector[0] <= pixman_int_to_fixed(src_img->bits.width) - pixman_fixed_1 / 2); assert(v2.vector[1] <= pixman_int_to_fixed(src_img->bits.height) - pixman_fixed_1 / 2); } else { assert(v1.vector[0] >= pixman_fixed_e); assert(v1.vector[1] >= pixman_fixed_e); assert(v2.vector[0] <= pixman_int_to_fixed(src_img->bits.width)); assert(v2.vector[1] <= pixman_int_to_fixed(src_img->bits.height)); } } static uint32_t test_cover(int testnum, int verbose) { pixman_fixed_t x_scale, y_scale; pixman_bool_t left_align, top_align; pixman_bool_t bilinear; pixman_filter_t filter; pixman_op_t op; size_t src_fmt_index; pixman_format_code_t src_fmt, dst_fmt, mask_fmt; pixman_image_t *src_img, *dst_img, *mask_img; pixman_transform_t src_transform, mask_transform; pixman_fixed_t fuzz[4]; uint32_t crc32; /* * We allocate one fenced image for each pixel format up-front. This is * to avoid spending a lot of time on memory management rather than on * testing Pixman optimisations. We need one per thread because the * transformation matrices and filtering are properties of the source and * mask images. */ static pixman_image_t *src_imgs[ARRAY_LENGTH(img_fmt_list)]; static pixman_image_t *mask_bits_img; static pixman_bool_t fence_images_created; #ifdef USE_OPENMP #pragma omp threadprivate (src_imgs) #pragma omp threadprivate (mask_bits_img) #pragma omp threadprivate (fence_images_created) #endif if (!fence_images_created) { int i; prng_srand(0); for (i = 0; i < ARRAY_LENGTH(img_fmt_list); i++) src_imgs[i] = create_src_image(img_fmt_list[i]); mask_bits_img = create_src_image(PIXMAN_a8); fence_images_created = TRUE; } prng_srand(testnum); x_scale = random_scale_factor(); y_scale = random_scale_factor(); left_align = prng_rand_n(2); top_align = prng_rand_n(2); bilinear = prng_rand_n(2); filter = bilinear ? PIXMAN_FILTER_BILINEAR : PIXMAN_FILTER_NEAREST; op = op_list[prng_rand_n(ARRAY_LENGTH(op_list))]; dst_fmt = img_fmt_list[prng_rand_n(ARRAY_LENGTH(img_fmt_list))]; dst_img = pixman_image_create_bits(dst_fmt, DST_WIDTH, DST_HEIGHT, NULL, 0); prng_randmemset(dst_img->bits.bits, dst_img->bits.rowstride * DST_HEIGHT * sizeof(uint32_t), 0); image_endian_swap(dst_img); src_fmt_index = prng_rand_n(ARRAY_LENGTH(img_fmt_list)); src_fmt = img_fmt_list[src_fmt_index]; src_img = src_imgs[src_fmt_index]; pixman_image_set_filter(src_img, filter, NULL, 0); pixman_transform_init_scale(&src_transform, x_scale, y_scale); src_transform.matrix[0][2] = calc_translate(dst_img->bits.width, src_img->bits.width, x_scale, left_align, bilinear); src_transform.matrix[1][2] = calc_translate(dst_img->bits.height, src_img->bits.height, y_scale, top_align, bilinear); if (prng_rand_n(2)) { /* No mask */ mask_fmt = PIXMAN_null; mask_img = NULL; } else if (prng_rand_n(2)) { /* a8 bitmap mask */ mask_fmt = PIXMAN_a8; mask_img = mask_bits_img; pixman_image_set_filter(mask_img, filter, NULL, 0); pixman_transform_init_scale(&mask_transform, x_scale, y_scale); mask_transform.matrix[0][2] = calc_translate(dst_img->bits.width, mask_img->bits.width, x_scale, left_align, bilinear); mask_transform.matrix[1][2] = calc_translate(dst_img->bits.height, mask_img->bits.height, y_scale, top_align, bilinear); } else { /* Solid mask */ pixman_color_t color; memset(&color, 0xAA, sizeof color); mask_fmt = PIXMAN_solid; mask_img = pixman_image_create_solid_fill(&color); } if (!exact) { int i = 0; while (i < 4) fuzz[i++] = random_offset(); src_transform.matrix[0][2] += fuzz[0]; src_transform.matrix[1][2] += fuzz[1]; mask_transform.matrix[0][2] += fuzz[2]; mask_transform.matrix[1][2] += fuzz[3]; } pixman_image_set_transform(src_img, &src_transform); if (mask_fmt == PIXMAN_a8) pixman_image_set_transform(mask_img, &mask_transform); if (verbose) { printf("op=%s\n", operator_name(op)); printf("src_fmt=%s, dst_fmt=%s, mask_fmt=%s\n", format_name(src_fmt), format_name(dst_fmt), format_name(mask_fmt)); printf("x_scale=0x%08X, y_scale=0x%08X, align %s/%s, %s\n", x_scale, y_scale, left_align ? "left" : "right", top_align ? "top" : "bottom", bilinear ? "bilinear" : "nearest"); if (!exact) { int i = 0; printf("fuzz factors"); while (i < 4) printf(" %d", fuzz[i++]); printf("\n"); } } if (exact) { check_transform(dst_img, src_img, &src_transform, bilinear); if (mask_fmt == PIXMAN_a8) check_transform(dst_img, mask_img, &mask_transform, bilinear); } pixman_image_composite(op, src_img, mask_img, dst_img, 0, 0, 0, 0, 0, 0, dst_img->bits.width, dst_img->bits.height); if (verbose) print_image(dst_img); crc32 = compute_crc32_for_image(0, dst_img); pixman_image_unref(dst_img); if (mask_fmt == PIXMAN_solid) pixman_image_unref(mask_img); return crc32; } #if BILINEAR_INTERPOLATION_BITS == 7 #define CHECKSUM_FUZZ 0x6B56F607 #define CHECKSUM_EXACT 0xA669F4A3 #elif BILINEAR_INTERPOLATION_BITS == 4 #define CHECKSUM_FUZZ 0x83119ED0 #define CHECKSUM_EXACT 0x0D3382CD #else #define CHECKSUM_FUZZ 0x00000000 #define CHECKSUM_EXACT 0x00000000 #endif int main(int argc, const char *argv[]) { unsigned long page_size; page_size = fence_get_page_size(); if (page_size == 0 || page_size > 16 * 1024) return 77; /* automake SKIP */ exact = getenv("EXACT") != NULL; if (exact) printf("Doing plots that are exactly aligned to boundaries\n"); return fuzzer_test_main("cover", 2000000, exact ? CHECKSUM_EXACT : CHECKSUM_FUZZ, test_cover, argc, argv); }
serialized.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <math.h> int main() { omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); void *creator_frame = get_frame_address(0); int t = (int)sin(0.1); #pragma omp task if (t) { void *task_frame = get_frame_address(0); if (creator_frame == task_frame) { // Assume this code was inlined which the compiler is allowed to do. print_frame(0); } else { // The exit frame must be our parent! print_frame_from_outlined_fn(1); } print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[NULL]] // CHECK-SAME: new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK-SAME: task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0) // CHECK-SAME: =[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]] // CHECK-SAME: parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[MAIN_REENTER]] // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2 // CHECK-SAME: codeptr_ra=0x{{[0-f]+}}, invoker={{[0-9]+}} // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]], // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: parent_task_frame.exit=[[EXIT]] // CHECK-SAME: parent_task_frame.reenter=[[REENTER]] // CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: // CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]] // CHECK-SAME: exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[REENTER]] // CHECK: {{^}}[[MASTER_ID]]: task level 2 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule // CHECK-SAME: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // parallel_id is 0 because the region ended in the barrier! // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] return 0; }
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <math.h> int main() { omp_set_nested(0); print_frame(0); print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); void *creator_frame = get_frame_address(0); int t = (int)sin(0.1); #pragma omp task if (t) { void *task_frame = get_frame_address(0); if (creator_frame == task_frame) { // Assume this code was inlined which the compiler is allowed to do. print_frame(0); } else { // The exit frame must be our parent! print_frame_from_outlined_fn(1); } print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); print_ids(0); } print_ids(0); // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[NULL]] // CHECK-SAME: new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK-SAME: task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0) // CHECK-SAME: =[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]] // CHECK-SAME: parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[MAIN_REENTER]] // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2 // CHECK-SAME: codeptr_ra=0x{{[0-f]+}}, invoker={{[0-9]+}} // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]], // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: parent_task_frame.exit=[[EXIT]] // CHECK-SAME: parent_task_frame.reenter=[[REENTER]] // CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: // CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]] // CHECK-SAME: exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[REENTER]] // CHECK: {{^}}[[MASTER_ID]]: task level 2 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule // CHECK-SAME: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // parallel_id is 0 because the region ended in the barrier! // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] return 0; }
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7 #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> #include <math.h> int main() { omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); void *creator_frame = get_frame_address(0); int t = (int)sin(0.1); #pragma omp task if (t) { void *task_frame = get_frame_address(0); if (creator_frame == task_frame) { // Assume this code was inlined which the compiler is allowed to do. print_frame(0); } else { // The exit frame must be our parent! print_frame_from_outlined_fn(1); } print_ids(0); print_ids(1); print_ids(2); } print_fuzzy_address(1); print_ids(0); } print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[NULL]] // CHECK-SAME: new_task_id={{[0-9]+}}, codeptr_ra=[[NULL]] // CHECK-SAME: task_type=ompt_task_initial=1, has_dependences=no // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0) // CHECK-SAME: =[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin // CHECK-SAME: parent_task_id=[[PARENT_TASK_ID:[0-9]+]] // CHECK-SAME: parent_task_frame.exit=[[NULL]] // CHECK-SAME: parent_task_frame.reenter=[[MAIN_REENTER]] // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2 // CHECK-SAME: codeptr_ra=0x{{[0-f]+}}, invoker={{[0-9]+}} // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]], // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create // CHECK-SAME: parent_task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: parent_task_frame.exit=[[EXIT]] // CHECK-SAME: parent_task_frame.reenter=[[REENTER]] // CHECK-SAME: new_task_id=[[TASK_ID:[0-9]+]] // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule: // CHECK-SAME: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address // CHECK-SAME: =[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]] // CHECK-SAME: exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[REENTER]] // CHECK: {{^}}[[MASTER_ID]]: task level 2 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_schedule // CHECK-SAME: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end // parallel_id is 0 because the region ended in the barrier! // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]] // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address // CHECK-SAME: =[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1 // CHECK-SAME: parallel_id=[[IMPLICIT_PARALLEL_ID]] // CHECK-SAME: task_id=[[PARENT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[MAIN_REENTER]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)={{0x[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0 // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK-SAME: exit_frame=[[NULL]], reenter_frame=[[NULL]] // parallel_id is 0 because the region ended in the barrier! // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end // CHECK-SAME: parallel_id=0, task_id=[[IMPLICIT_TASK_ID]] return 0; }
GB_unop__minv_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint8_uint8) // op(A') function: GB (_unop_tran__minv_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint8_uint8) // op(A') function: GB (_unop_tran__minv_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_uint8_uint8) // op(A') function: GB (_unop_tran__minv_uint8_uint8) // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 8) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 8) ; // casting #define GB_CAST(z, aij) \ uint8_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = aij ; \ Cx [pC] = GB_IMINV_UNSIGNED (z, 8) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_uint8_uint8) ( uint8_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint8_t z = aij ; Cx [p] = GB_IMINV_UNSIGNED (z, 8) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_uint8_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_uint64 // op(A') function: GB_tran__abs_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_uint64 // op(A') function: GB_tran__abs_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_uint64 // op(A') function: GB_tran__abs_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_bool_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_int16 // op(A') function: GB_tran__lnot_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_int16 ( bool *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_int16 // op(A') function: GB_tran__lnot_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_int16 ( bool *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_int16 // op(A') function: GB_tran__lnot_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_int16 ( bool *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
has160_fmt_plug.c
/* HAS160-512 cracker patch for JtR. Hacked together during May, 2015 * by Dhiru Kholia <dhiru.kholia at gmail.com>. * * Thanks for RHash, http://www.randombit.net/has160.html and * https://github.com/maciejczyzewski/retter for the code. */ #if FMT_EXTERNS_H extern struct fmt_main fmt__HAS160; #elif FMT_REGISTERS_H john_register_one(&fmt__HAS160); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "has160.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 64 #else #define OMP_SCALE 2048 #endif // __MIC__ #endif // OMP_SCALE #include <omp.h> #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "has-160" #define FORMAT_NAME "" #define ALGORITHM_NAME "HAS-160 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 40 #define BINARY_SIZE 20 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"307964ef34151d37c8047adec7ab50f4ff89762d", ""}, {"cb5d7efbca2f02e0fb7167cabb123af5795764e5", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"}, {"4872bcbc4cd0f0a9dc7c2f7045e5b43b6c830db8", "a"}, {"975e810488cf2a3d49838478124afce4b1c78804", "abc"}, {"2338dbc8638d31225f73086246ba529f96710bc6", "message digest"}, {"596185c9ab6703d0d0dbb98702bc0f5729cd1d3c", "abcdefghijklmnopqrstuvwxyz"}, {"07f05c8c0773c55ca3a5a695ce6aca4c438911b5", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[(BINARY_SIZE) / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; q = p; while (atoi16l[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; saved_key[index][len] = 0; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { has160_ctx ctx; rhash_has160_init(&ctx); rhash_has160_update(&ctx, (unsigned char*)saved_key[index], saved_len[index]); rhash_has160_final(&ctx, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt__HAS160 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
/* * HAS160-512 cracker patch for JtR. Hacked together during May, 2015 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * Thanks for RHash, http://www.randombit.net/has160.html and * https://github.com/maciejczyzewski/retter for the code. */ #if FMT_EXTERNS_H extern struct fmt_main fmt__HAS160; #elif FMT_REGISTERS_H john_register_one(&fmt__HAS160); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "has160.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #include "memdbg.h" #define FORMAT_LABEL "has-160" #define FORMAT_NAME "" #define ALGORITHM_NAME "HAS-160 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 40 #define BINARY_SIZE 20 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"307964ef34151d37c8047adec7ab50f4ff89762d", ""}, {"cb5d7efbca2f02e0fb7167cabb123af5795764e5", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"}, {"4872bcbc4cd0f0a9dc7c2f7045e5b43b6c830db8", "a"}, {"975e810488cf2a3d49838478124afce4b1c78804", "abc"}, {"2338dbc8638d31225f73086246ba529f96710bc6", "message digest"}, {"596185c9ab6703d0d0dbb98702bc0f5729cd1d3c", "abcdefghijklmnopqrstuvwxyz"}, {"07f05c8c0773c55ca3a5a695ce6aca4c438911b5", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32(*crypt_out)[(BINARY_SIZE) / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; q = p; while (atoi16l[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static void * get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; saved_key[index][len] = 0; memcpy(saved_key[index], key, len); } static char * get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; for (index = 0; index < count; index++) { has160_ctx ctx; rhash_has160_init(&ctx); rhash_has160_update(&ctx, (unsigned char *)saved_key[index], saved_len[index]); rhash_has160_final(&ctx, (unsigned char *)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt__HAS160 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT, {NULL}, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, {NULL}, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
/* * HAS160-512 cracker patch for JtR. Hacked together during May, 2015 by * Dhiru Kholia <dhiru.kholia at gmail.com>. * * Thanks for RHash, http://www.randombit.net/has160.html and * https://github.com/maciejczyzewski/retter for the code. */ #if FMT_EXTERNS_H extern struct fmt_main fmt__HAS160; #elif FMT_REGISTERS_H john_register_one(&fmt__HAS160); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "has160.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 64 #else #define OMP_SCALE 2048 #endif /* // __MIC__ */ #endif /* // OMP_SCALE */ #include <omp.h> #endif /* // _OPENMP */ #include "memdbg.h" #define FORMAT_LABEL "has-160" #define FORMAT_NAME "" #define ALGORITHM_NAME "HAS-160 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 40 #define BINARY_SIZE 20 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"307964ef34151d37c8047adec7ab50f4ff89762d", ""}, {"cb5d7efbca2f02e0fb7167cabb123af5795764e5", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"}, {"4872bcbc4cd0f0a9dc7c2f7045e5b43b6c830db8", "a"}, {"975e810488cf2a3d49838478124afce4b1c78804", "abc"}, {"2338dbc8638d31225f73086246ba529f96710bc6", "message digest"}, {"596185c9ab6703d0d0dbb98702bc0f5729cd1d3c", "abcdefghijklmnopqrstuvwxyz"}, {"07f05c8c0773c55ca3a5a695ce6aca4c438911b5", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, {NULL} }; static int (*saved_len); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32(*crypt_out)[(BINARY_SIZE) / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; q = p; while (atoi16l[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static void * get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; saved_key[index][len] = 0; memcpy(saved_key[index], key, len); } static char * get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { has160_ctx ctx; rhash_has160_init(&ctx); rhash_has160_update(&ctx, (unsigned char *)saved_key[index], saved_len[index]); rhash_has160_final(&ctx, (unsigned char *)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt__HAS160 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, {NULL}, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, fmt_default_salt, {NULL}, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
regex-dna.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Based on C contribution of Mike Pall // Contributed by The Anh Tran /* http://benchmarksgame.alioth.debian.org/u64q/program.php?test=regexdna&lang=gcc&id=4 usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp regexdna.gcc-4.c -o regexdna.gcc-4.gcc_run -lpcre ./regexdna.gcc-4.gcc_run 0 < regexdna-input5000000.txt */ #define _GNU_SOURCE #include <omp.h> #include <sched.h> #include <pcre.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <memory.h> // read all redirected data from stdin // strip DNA headers and newline characters char* ReadInput_StripHeader( size_t *file_size, size_t *strip_size ) { // get input size *file_size = ftell(stdin); fseek(stdin, 0, SEEK_END); *file_size = ftell(stdin) - *file_size; fseek(stdin, 0, SEEK_SET); *strip_size = 0; // load original content into memory char* input = (char*)malloc(*file_size +1); assert(input != 0); { size_t sz = fread(input, 1, *file_size, stdin); assert(sz == *file_size); input[*file_size] = 0; } // alloc space for regex_replace char* output = (char*)malloc(*file_size); assert(output != 0); const char* re_error; int re_erroff; // compile pattern pcre* re = pcre_compile(">.*\\n|\\n", 0, &re_error, &re_erroff, 0); pcre_extra* re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position; int match[3]; // regex_replace for( position = 0; pcre_exec(re, re_extra, input, *file_size, position, 0, match, 3) >= 0; position = match[1] ) { int char_to_copy = match[0] - position; memcpy(output + (*strip_size), input + position, char_to_copy); *strip_size += char_to_copy; } // copy remain part int char_to_copy = *file_size - position; memcpy(output + (*strip_size), input + position, char_to_copy); *strip_size += char_to_copy; free(input); pcre_free(re_extra); pcre_free(re); return output; } void Count_Patterns(char const* input, size_t input_len, char* result) { static char const* ptns[] = { "agggtaaa|tttaccct", "[cgt]gggtaaa|tttaccc[acg]", "a[act]ggtaaa|tttacc[agt]t", "ag[act]gtaaa|tttac[agt]ct", "agg[act]taaa|ttta[agt]cct", "aggg[acg]aaa|ttt[cgt]ccct", "agggt[cgt]aa|tt[acg]accct", "agggta[cgt]a|t[acg]taccct", "agggtaa[cgt]|[acg]ttaccct" }; static const int n_ptns = sizeof(ptns) / sizeof(ptns[0]); static size_t counters[9]; int i; #pragma omp for schedule(dynamic, 1) nowait for (i = 0; i < n_ptns; ++i) { const char* re_error = 0; int re_erroff = 0; pcre* re = pcre_compile(ptns[i], 0, &re_error, &re_erroff, 0); pcre_extra* re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position, count; int match[3]; // regex_search for( position = count = 0; pcre_exec(re, re_extra, input, input_len, position, 0, match, 3) >= 0; position = match[1] ) ++count; counters[i] = count; pcre_free(re_extra); pcre_free(re); } // we want the last thread, reaching this code block, to print result static size_t thread_passed = 0; if (__sync_add_and_fetch(&thread_passed, 1) == (size_t)omp_get_num_threads() ) { int plen = 0; int i; for (i = 0; i < n_ptns; ++i) plen += sprintf(result + plen, "%s %d\n", ptns[i], counters[i]); thread_passed = 0; } } typedef struct IUB_T { const char* iub; int len; } IUB; IUB const iub_table[] = { {0}, {"(c|g|t)", 7}, {0}, {"(a|g|t)", 7}, {0}, {0}, {0}, {"(a|c|t)", 7}, {0}, {0}, {"(g|t)", 5}, {0}, {"(a|c)", 5}, {"(a|c|g|t)", 9}, {0}, {0}, {0}, {"(a|g)", 5}, {"(c|t)", 5}, {0}, {0}, {"(a|c|g)", 7}, {"(a|t)", 5}, {0}, {"(c|t)", 5} }; int const n_iub = sizeof(iub_table)/sizeof(iub_table[0]); void Replace_Patterns(char const* input, size_t input_len, size_t* repl_len) { #pragma omp single nowait { // input_len * 1.5 char* output = (char*)malloc(input_len + (input_len >> 1)); assert(output != 0); const char* re_error = 0; int re_erroff = 0; pcre* re = pcre_compile("[BDHKMNRSVWY]", 0, &re_error, &re_erroff, 0); pcre_extra* re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position; int match[3]; int replace_len = 0; // regex_replace for( position = 0; pcre_exec(re, re_extra, input, input_len, position, 0, match, 3) >= 0; position = match[1] ) { int char_to_copy = match[0] - position; memcpy(output + replace_len, input + position, char_to_copy); replace_len += char_to_copy; IUB const* i_r = iub_table + (input[match[0]] - 'A'); char_to_copy = i_r->len; memcpy(output + replace_len, i_r->iub, char_to_copy); replace_len += char_to_copy; } // copy remain part int char_to_copy = input_len - position; memcpy(output + replace_len, input + position, char_to_copy); replace_len += char_to_copy; free(output); pcre_free(re_extra); pcre_free(re); *repl_len = replace_len; } } // Detect single - multi thread benchmark int GetThreadCount() { cpu_set_t cs; int count = 0; int i; CPU_ZERO(&cs); sched_getaffinity(0, sizeof(cs), &cs); for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cs)) ++count; } return count; } int main() { size_t initial_length = 0; size_t striped_length = 0; size_t replace_length = 0; char* input = ReadInput_StripHeader (&initial_length, &striped_length); char match_result[1024]; #pragma omp parallel default(shared) num_threads(GetThreadCount()) { Count_Patterns (input, striped_length, match_result); Replace_Patterns(input, striped_length, &replace_length); } printf("%s\n%d\n%d\n%d\n", match_result, initial_length, striped_length, replace_length ); free(input); return 0; }
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org / // //Based on C contribution of Mike Pall // Contributed by The Anh Tran /* * ht * p://benchmarksgame.alioth.debian.org/u64q/program.php?test=regexdna&lang=gcc * &id=4 * * usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp * regexdna.gcc-4.c -o regexdna.gcc-4.gcc_run -lpcre * * ./regexdna.gcc-4.gcc_run 0 < regexdna-input5000000.txt */ #define _GNU_SOURCE #include <omp.h> #include <sched.h> #include <pcre.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <memory.h> // read all redirected data from stdin // strip DNA headers and newline characters char * ReadInput_StripHeader(size_t * file_size, size_t * strip_size) { //get input size * file_size = ftell(stdin); fseek(stdin, 0, SEEK_END); *file_size = ftell(stdin) - *file_size; fseek(stdin, 0, SEEK_SET); *strip_size = 0; //load original content into memory char *input = (char *)malloc(*file_size + 1); assert(input != 0); { size_t sz = fread(input, 1, *file_size, stdin); assert(sz == *file_size); input[*file_size] = 0; } //alloc space for regex_replace char *output = (char *)malloc(*file_size); assert(output != 0); const char *re_error; int re_erroff; //compile pattern pcre * re = pcre_compile(">.*\\n|\\n", 0, &re_error, &re_erroff, 0); pcre_extra *re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position; int match[3]; //regex_replace for (position = 0; pcre_exec(re, re_extra, input, *file_size, position, 0, match, 3) >= 0; position = match[1]) { int char_to_copy = match[0] - position; memcpy(output + (*strip_size), input + position, char_to_copy); *strip_size += char_to_copy; } //copy remain part int char_to_copy = *file_size - position; memcpy(output + (*strip_size), input + position, char_to_copy); *strip_size += char_to_copy; free(input); pcre_free(re_extra); pcre_free(re); return output; } void Count_Patterns(char const *input, size_t input_len, char *result) { static char const *ptns[] = { "agggtaaa|tttaccct", "[cgt]gggtaaa|tttaccc[acg]", "a[act]ggtaaa|tttacc[agt]t", "ag[act]gtaaa|tttac[agt]ct", "agg[act]taaa|ttta[agt]cct", "aggg[acg]aaa|ttt[cgt]ccct", "agggt[cgt]aa|tt[acg]accct", "agggta[cgt]a|t[acg]taccct", "agggtaa[cgt]|[acg]ttaccct" }; static const int n_ptns = sizeof(ptns) / sizeof(ptns[0]); static size_t counters[9]; int i; for (i = 0; i < n_ptns; ++i) { const char *re_error = 0; int re_erroff = 0; pcre *re = pcre_compile(ptns[i], 0, &re_error, &re_erroff, 0); pcre_extra *re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position, count; int match[3]; //regex_search for (position = count = 0; pcre_exec(re, re_extra, input, input_len, position, 0, match, 3) >= 0; position = match[1]) ++count; counters[i] = count; pcre_free(re_extra); pcre_free(re); } //we want the last thread, reaching this code block, to print result static size_t thread_passed = 0; if (__sync_add_and_fetch(&thread_passed, 1) == (size_t) omp_get_num_threads()) { int plen = 0; int i; for (i = 0; i < n_ptns; ++i) plen += sprintf(result + plen, "%s %d\n", ptns[i], counters[i]); thread_passed = 0; } } typedef struct IUB_T { const char *iub; int len; } IUB; IUB const iub_table[] = { {0}, {"(c|g|t)", 7}, {0}, {"(a|g|t)", 7}, {0}, {0}, {0}, {"(a|c|t)", 7}, {0}, {0}, {"(g|t)", 5}, {0}, {"(a|c)", 5}, {"(a|c|g|t)", 9}, {0}, {0}, {0}, {"(a|g)", 5}, {"(c|t)", 5}, {0}, {0}, {"(a|c|g)", 7}, {"(a|t)", 5}, {0}, {"(c|t)", 5} }; int const n_iub = sizeof(iub_table) / sizeof(iub_table[0]); void Replace_Patterns(char const *input, size_t input_len, size_t * repl_len) { //input_len * 1.5 char *output = (char *)malloc(input_len + (input_len >> 1)); assert(output != 0); const char *re_error = 0; int re_erroff = 0; pcre *re = pcre_compile("[BDHKMNRSVWY]", 0, &re_error, &re_erroff, 0); pcre_extra *re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position; int match[3]; int replace_len = 0; //regex_replace for (position = 0; pcre_exec(re, re_extra, input, input_len, position, 0, match, 3) >= 0; position = match[1]) { int char_to_copy = match[0] - position; memcpy(output + replace_len, input + position, char_to_copy); replace_len += char_to_copy; IUB const *i_r = iub_table + (input[match[0]] - 'A'); char_to_copy = i_r->len; memcpy(output + replace_len, i_r->iub, char_to_copy); replace_len += char_to_copy; } //copy remain part int char_to_copy = input_len - position; memcpy(output + replace_len, input + position, char_to_copy); replace_len += char_to_copy; free(output); pcre_free(re_extra); pcre_free(re); *repl_len = replace_len; } //Detect single - multi thread benchmark int GetThreadCount() { cpu_set_t cs; int count = 0; int i; CPU_ZERO(&cs); sched_getaffinity(0, sizeof(cs), &cs); for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cs)) ++count; } return count; } int main() { size_t initial_length = 0; size_t striped_length = 0; size_t replace_length = 0; char *input = ReadInput_StripHeader(&initial_length, &striped_length); char match_result[1024]; Count_Patterns(input, striped_length, match_result); Replace_Patterns(input, striped_length, &replace_length); printf("%s\n%d\n%d\n%d\n", match_result, initial_length, striped_length, replace_length); free(input); return 0; }
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org / // //Based on C contribution of Mike Pall // Contributed by The Anh Tran /* * ht * p://benchmarksgame.alioth.debian.org/u64q/program.php?test=regexdna&lang=gcc * &id=4 * * usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp * regexdna.gcc-4.c -o regexdna.gcc-4.gcc_run -lpcre * * ./regexdna.gcc-4.gcc_run 0 < regexdna-input5000000.txt */ #define _GNU_SOURCE #include <omp.h> #include <sched.h> #include <pcre.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <memory.h> // read all redirected data from stdin // strip DNA headers and newline characters char * ReadInput_StripHeader(size_t * file_size, size_t * strip_size) { //get input size * file_size = ftell(stdin); fseek(stdin, 0, SEEK_END); *file_size = ftell(stdin) - *file_size; fseek(stdin, 0, SEEK_SET); *strip_size = 0; //load original content into memory char *input = (char *)malloc(*file_size + 1); assert(input != 0); { size_t sz = fread(input, 1, *file_size, stdin); assert(sz == *file_size); input[*file_size] = 0; } //alloc space for regex_replace char *output = (char *)malloc(*file_size); assert(output != 0); const char *re_error; int re_erroff; //compile pattern pcre * re = pcre_compile(">.*\\n|\\n", 0, &re_error, &re_erroff, 0); pcre_extra *re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position; int match[3]; //regex_replace for (position = 0; pcre_exec(re, re_extra, input, *file_size, position, 0, match, 3) >= 0; position = match[1]) { int char_to_copy = match[0] - position; memcpy(output + (*strip_size), input + position, char_to_copy); *strip_size += char_to_copy; } //copy remain part int char_to_copy = *file_size - position; memcpy(output + (*strip_size), input + position, char_to_copy); *strip_size += char_to_copy; free(input); pcre_free(re_extra); pcre_free(re); return output; } void Count_Patterns(char const *input, size_t input_len, char *result) { static char const *ptns[] = { "agggtaaa|tttaccct", "[cgt]gggtaaa|tttaccc[acg]", "a[act]ggtaaa|tttacc[agt]t", "ag[act]gtaaa|tttac[agt]ct", "agg[act]taaa|ttta[agt]cct", "aggg[acg]aaa|ttt[cgt]ccct", "agggt[cgt]aa|tt[acg]accct", "agggta[cgt]a|t[acg]taccct", "agggtaa[cgt]|[acg]ttaccct" }; static const int n_ptns = sizeof(ptns) / sizeof(ptns[0]); static size_t counters[9]; int i; #pragma omp for schedule(dynamic, 1) nowait for (i = 0; i < n_ptns; ++i) { const char *re_error = 0; int re_erroff = 0; pcre *re = pcre_compile(ptns[i], 0, &re_error, &re_erroff, 0); pcre_extra *re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position, count; int match[3]; //regex_search for (position = count = 0; pcre_exec(re, re_extra, input, input_len, position, 0, match, 3) >= 0; position = match[1]) ++count; counters[i] = count; pcre_free(re_extra); pcre_free(re); } //we want the last thread, reaching this code block, to print result static size_t thread_passed = 0; if (__sync_add_and_fetch(&thread_passed, 1) == (size_t) omp_get_num_threads()) { int plen = 0; int i; for (i = 0; i < n_ptns; ++i) plen += sprintf(result + plen, "%s %d\n", ptns[i], counters[i]); thread_passed = 0; } } typedef struct IUB_T { const char *iub; int len; } IUB; IUB const iub_table[] = { {0}, {"(c|g|t)", 7}, {0}, {"(a|g|t)", 7}, {0}, {0}, {0}, {"(a|c|t)", 7}, {0}, {0}, {"(g|t)", 5}, {0}, {"(a|c)", 5}, {"(a|c|g|t)", 9}, {0}, {0}, {0}, {"(a|g)", 5}, {"(c|t)", 5}, {0}, {0}, {"(a|c|g)", 7}, {"(a|t)", 5}, {0}, {"(c|t)", 5} }; int const n_iub = sizeof(iub_table) / sizeof(iub_table[0]); void Replace_Patterns(char const *input, size_t input_len, size_t * repl_len) { #pragma omp single nowait { //input_len * 1.5 char *output = (char *)malloc(input_len + (input_len >> 1)); assert(output != 0); const char *re_error = 0; int re_erroff = 0; pcre *re = pcre_compile("[BDHKMNRSVWY]", 0, &re_error, &re_erroff, 0); pcre_extra *re_extra = pcre_study(re, 0, &re_error); assert(re != 0); int position; int match[3]; int replace_len = 0; //regex_replace for (position = 0; pcre_exec(re, re_extra, input, input_len, position, 0, match, 3) >= 0; position = match[1]) { int char_to_copy = match[0] - position; memcpy(output + replace_len, input + position, char_to_copy); replace_len += char_to_copy; IUB const *i_r = iub_table + (input[match[0]] - 'A'); char_to_copy = i_r->len; memcpy(output + replace_len, i_r->iub, char_to_copy); replace_len += char_to_copy; } //copy remain part int char_to_copy = input_len - position; memcpy(output + replace_len, input + position, char_to_copy); replace_len += char_to_copy; free(output); pcre_free(re_extra); pcre_free(re); *repl_len = replace_len; } } //Detect single - multi thread benchmark int GetThreadCount() { cpu_set_t cs; int count = 0; int i; CPU_ZERO(&cs); sched_getaffinity(0, sizeof(cs), &cs); for (i = 0; i < CPU_SETSIZE; ++i) { if (CPU_ISSET(i, &cs)) ++count; } return count; } int main() { size_t initial_length = 0; size_t striped_length = 0; size_t replace_length = 0; char *input = ReadInput_StripHeader(&initial_length, &striped_length); char match_result[1024]; #pragma omp parallel default(shared) num_threads(GetThreadCount()) { Count_Patterns(input, striped_length, match_result); Replace_Patterns(input, striped_length, &replace_length); } printf("%s\n%d\n%d\n%d\n", match_result, initial_length, striped_length, replace_length); free(input); return 0; }
hermm_c_dia_n_lo_col_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Complex* Y = &y[index2(cc,0,ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i],Y[i],beta); const ALPHA_Complex* X = &x[index2(cc,0,ldx)]; for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d < 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Complex val,val_c; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha); alpha_madde(Y[ar],val_c,X[ac]); alpha_madde(Y[ac],val,X[ar]); } } if(d == 0){ for(ALPHA_INT r = 0; r < mat->rows; ++r){ ALPHA_Complex val; alpha_mul_2c(val,mat->values[index2(di,r,mat->lval)],alpha); alpha_madde(Y[r],val,X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA * mat, const ALPHA_Complex * x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex * y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Complex *Y = &y[index2(cc, 0, ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i], Y[i], beta); const ALPHA_Complex *X = &x[index2(cc, 0, ldx)]; for (ALPHA_INT di = 0; di < mat->ndiag; ++di) { ALPHA_INT d = mat->distance[di]; if (d < 0) { ALPHA_INT ars = alpha_max(0, -d); ALPHA_INT acs = alpha_max(0, d); ALPHA_INT an = alpha_min(mat->rows - ars, mat->cols - acs); for (ALPHA_INT i = 0; i < an; ++i) { ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Complex val, val_c; alpha_mul(val, mat->values[index2(di, ar, mat->lval)], alpha); alpha_mul_2c(val_c, mat->values[index2(di, ar, mat->lval)], alpha); alpha_madde(Y[ar], val_c, X[ac]); alpha_madde(Y[ac], val, X[ar]); } } if (d == 0) { for (ALPHA_INT r = 0; r < mat->rows; ++r) { ALPHA_Complex val; alpha_mul_2c(val, mat->values[index2(di, r, mat->lval)], alpha); alpha_madde(Y[r], val, X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> #include <stdlib.h> alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA * mat, const ALPHA_Complex * x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex * y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Complex *Y = &y[index2(cc, 0, ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i], Y[i], beta); const ALPHA_Complex *X = &x[index2(cc, 0, ldx)]; for (ALPHA_INT di = 0; di < mat->ndiag; ++di) { ALPHA_INT d = mat->distance[di]; if (d < 0) { ALPHA_INT ars = alpha_max(0, -d); ALPHA_INT acs = alpha_max(0, d); ALPHA_INT an = alpha_min(mat->rows - ars, mat->cols - acs); for (ALPHA_INT i = 0; i < an; ++i) { ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Complex val, val_c; alpha_mul(val, mat->values[index2(di, ar, mat->lval)], alpha); alpha_mul_2c(val_c, mat->values[index2(di, ar, mat->lval)], alpha); alpha_madde(Y[ar], val_c, X[ac]); alpha_madde(Y[ac], val, X[ar]); } } if (d == 0) { for (ALPHA_INT r = 0; r < mat->rows; ++r) { ALPHA_Complex val; alpha_mul_2c(val, mat->values[index2(di, r, mat->lval)], alpha); alpha_madde(Y[r], val, X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
flip_compute.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <stdint.h> #include <vector> #include "lite/core/kernel.h" namespace paddle { namespace lite { namespace kernels { namespace host { template <typename T> class FlipCompute : public KernelLite<TARGET(kHost), PRECISION(kAny)> { public: using param_t = operators::FcParam; void Run() { auto& param = this->Param<operators::FlipParam>(); auto x = param.X; auto out = param.Out; auto flip_dims = param.axis; auto x_dims = x->dims(); const int total_dims = x_dims.size(); std::vector<bool> dim_bitset(64); for (size_t i = 0; i < flip_dims.size(); ++i) { int dim = flip_dims[i]; if (flip_dims[i] < 0) { dim += total_dims; } dim_bitset[dim] = true; } auto x_strides = x_dims.Vectorize(); auto numel = x->numel(); const T* x_data = x->template data<T>(); T* out_data = out->template mutable_data<T>(); #pragma omp parallel for for (int64_t i = 0; i < numel; ++i) { int64_t cur_indices = i; int64_t rem = 0; int64_t dst_offset = 0; for (int d = 0; d < total_dims; ++d) { int64_t temp = cur_indices; cur_indices = cur_indices / x_strides[d]; rem = temp - cur_indices * x_strides[d]; dst_offset += dim_bitset[d] ? (x_dims[d] - 1 - cur_indices) * x_strides[d] : cur_indices * x_strides[d]; cur_indices = rem; } out_data[i] = x_data[dst_offset]; } } ~FlipCompute() = default; }; } // namespace host } // namespace kernels } // namespace lite } // namespace paddle
// Copyright(c) 2019 PaddlePaddle Authors.All Rights Reserved. // //Licensed under the Apache License, Version 2.0(the "License"); //you may not use this file except in compliance with the License. // You may obtain a copy of the License at // //http://www.apache.org / licenses / LICENSE - 2.0 // //Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <stdint.h> #include <vector> #include "lite/core/kernel.h" namespace paddle { namespace lite { namespace kernels { namespace host { template < typename T > class FlipCompute:public KernelLite < TARGET(kHost), PRECISION(kAny) > { public: using param_t = operators::FcParam; void Run() { auto & param = this->Param < operators::FlipParam > (); auto x = param.X; auto out = param.Out; auto flip_dims = param.axis; auto x_dims = x->dims(); const int total_dims = x_dims.size(); std: : vector < bool > dim_bitset(64); for (size_t i = 0; i < flip_dims.size(); ++i) { int dim = flip_dims[i]; if (flip_dims[i] < 0) { dim += total_dims; } dim_bitset[dim] = true; } auto x_strides = x_dims.Vectorize(); auto numel = x->numel(); const T *x_data = x->template data < T > (); T *out_data = out->template mutable_data < T > (); for (int64_t i = 0; i < numel; ++i) { int64_t cur_indices = i; int64_t rem = 0; int64_t dst_offset = 0; for (int d = 0; d < total_dims; ++d) { int64_t temp = cur_indices; cur_indices = cur_indices / x_strides[d]; rem = temp - cur_indices * x_strides[d]; dst_offset += dim_bitset[d] ? (x_dims[d] - 1 - cur_indices) * x_strides[d] : cur_indices * x_strides[d]; cur_indices = rem; } out_data[i] = x_data[dst_offset]; } } ~FlipCompute() = default; }; } //namespace host } //namespace kernels } //namespace lite } //namespace paddle
// Copyright(c) 2019 PaddlePaddle Authors.All Rights Reserved. // //Licensed under the Apache License, Version 2.0(the "License"); //you may not use this file except in compliance with the License. // You may obtain a copy of the License at // //http://www.apache.org / licenses / LICENSE - 2.0 // //Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, //WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <stdint.h> #include <vector> #include "lite/core/kernel.h" namespace paddle { namespace lite { namespace kernels { namespace host { template < typename T > class FlipCompute:public KernelLite < TARGET(kHost), PRECISION(kAny) > { public: using param_t = operators::FcParam; void Run() { auto & param = this->Param < operators::FlipParam > (); auto x = param.X; auto out = param.Out; auto flip_dims = param.axis; auto x_dims = x->dims(); const int total_dims = x_dims.size(); std: : vector < bool > dim_bitset(64); for (size_t i = 0; i < flip_dims.size(); ++i) { int dim = flip_dims[i]; if (flip_dims[i] < 0) { dim += total_dims; } dim_bitset[dim] = true; } auto x_strides = x_dims.Vectorize(); auto numel = x->numel(); const T *x_data = x->template data < T > (); T *out_data = out->template mutable_data < T > (); #pragma omp parallel for for (int64_t i = 0; i < numel; ++i) { int64_t cur_indices = i; int64_t rem = 0; int64_t dst_offset = 0; for (int d = 0; d < total_dims; ++d) { int64_t temp = cur_indices; cur_indices = cur_indices / x_strides[d]; rem = temp - cur_indices * x_strides[d]; dst_offset += dim_bitset[d] ? (x_dims[d] - 1 - cur_indices) * x_strides[d] : cur_indices * x_strides[d]; cur_indices = rem; } out_data[i] = x_data[dst_offset]; } } ~FlipCompute() = default; }; } //namespace host } //namespace kernels } //namespace lite } //namespace paddle
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; const Image *next; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError,"ImagesAreNotTheSameSize"); } combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse) { InheritException(exception,&combine_image->exception); combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace); else (void) SetImageColorspace(combine_image,sRGBColorspace); } if ((channel & OpacityChannel) != 0) combine_image->matte=MagickTrue; (void) SetImageBackgroundColor(combine_image); /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; PixelPacket *pixels; const PixelPacket *magick_restrict p; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } next=image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket *indexes; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(combine_view); for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p))); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CombineImageTag,progress, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void) TransformImageColorspace(combine_image,sRGBColorspace); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImageChannel() separates a channel from the image and returns it as % a grayscale image. A channel is a particular color component of each pixel % in the image. % % The format of the SeparateImageChannel method is: % % MagickBooleanType SeparateImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % */ MagickExport Image *SeparateImage(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *separate_image; MagickBooleanType status; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); status=SeparateImageChannel(separate_image,channel); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image *image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (channel == GrayChannels) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Separate image channels. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelGreen(q)); SetPixelBlue(q,GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelBlue(q)); SetPixelGreen(q,GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIndex(indexes+x)); SetPixelGreen(q,GetPixelIndex(indexes+x)); SetPixelBlue(q,GetPixelIndex(indexes+x)); q++; } break; } case TrueAlphaChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelAlpha(q)); SetPixelGreen(q,GetPixelAlpha(q)); SetPixelBlue(q,GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (channel != GrayChannels) { image->matte=MagickFalse; (void) SetImageColorspace(image,GRAYColorspace); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % MagickBooleanType SeparateImages(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channels to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); if ((channel & RedChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,RedChannel); AppendImageToList(&images,separate_image); } if ((channel & GreenChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,GreenChannel); AppendImageToList(&images,separate_image); } if ((channel & BlueChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlueChannel); AppendImageToList(&images,separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlackChannel); AppendImageToList(&images,separate_image); } if ((channel & AlphaChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,TrueAlphaChannel); AppendImageToList(&images,separate_image); } return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelType alpha_type) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, Disassociate, % DeactivateAlphaChannel, ExtractAlphaChannel, OpaqueAlphaChannel, % ResetAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, and % TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelType alpha_type) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); exception=(&image->exception); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { if (image->matte == MagickTrue) return(status); image->matte=MagickTrue; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q))); SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q))); SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->matte=MagickFalse; break; } case BackgroundAlphaChannel: { IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; /* Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q,pixel.red); SetPixelGreen(q,pixel.green); SetPixelBlue(q,pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* Special usage case for SeparateImageChannel(): copy grayscale color to the alpha channel. */ status=SeparateImageChannel(image,GrayChannels); image->matte=MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* Reset all color channels to background color. */ GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *) NULL,&background); (void) LevelColorsImage(image,&background,&background,MagickTrue); } break; } case DeactivateAlphaChannel: { if (image->matte == MagickFalse) return(status); image->matte=MagickFalse; break; } case DisassociateAlphaChannel: { status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; image->matte=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double alpha, gamma; alpha=QuantumScale*GetPixelAlpha(q); gamma=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q))); SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q))); SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->matte=MagickFalse; break; } case ExtractAlphaChannel: { status=SeparateImageChannel(image,TrueAlphaChannel); image->matte=MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { IndexPacket index; MagickPixelPacket background; PixelPacket pixel; /* Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); (void) memset(&pixel,0,sizeof(pixel)); index=0; SetPixelPacket(image,&background,&pixel,&index); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket *magick_restrict indexes; PixelPacket *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity; opacity=(double) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity,(MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity,(MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity,(MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity=ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status=SetImageOpacity(image,OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status=SetImageOpacity(image,OpaqueOpacity); break; } case TransparentAlphaChannel: { status=SetImageOpacity(image,TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); return(SyncImagePixelCache(image,&image->exception)); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m b i n e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CombineImages() combines one or more images into a single image. * The % grayscale value of the pixels of each image in the sequence is * assigned in % order to the specified channels of the combined image. * The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, * etc. % % The format of the CombineImages method is: % % Image * *CombineImages(const Image *image,const ChannelType channel, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * CombineImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { #define CombineImageTag "Combine/Image" CacheView * combine_view; const Image * next; Image * combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Ensure the image are the same size. */ assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next = image; next != (Image *) NULL; next = GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError, "ImagesAreNotTheSameSize"); } combine_image = CloneImage(image, 0, 0, MagickTrue, exception); if (combine_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(combine_image, DirectClass) == MagickFalse) { InheritException(exception, &combine_image->exception); combine_image = DestroyImage(combine_image); return ((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs(image->gamma - 1.0) <= MagickEpsilon) (void)SetImageColorspace(combine_image, RGBColorspace); else (void)SetImageColorspace(combine_image, sRGBColorspace); } if ((channel & OpacityChannel) != 0) combine_image->matte = MagickTrue; (void)SetImageBackgroundColor(combine_image); /* * Combine images. */ status = MagickTrue; progress = 0; combine_view = AcquireAuthenticCacheView(combine_image, exception); for (y = 0; y < (ssize_t) combine_image->rows; y++) { CacheView * image_view; const Image * next; PixelPacket * pixels; const PixelPacket * magick_restrict p; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(combine_view, 0, y, combine_image->columns, 1, exception); if (pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } next = image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket * indexes; image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; indexes = GetCacheViewAuthenticIndexQueue(combine_view); for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes + x, ClampToQuantum(GetPixelIntensity(image, p))); p++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, CombineImageTag, progress, combine_image->rows); if (proceed == MagickFalse) status = MagickFalse; } } combine_view = DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void)TransformImageColorspace(combine_image, sRGBColorspace); if (status == MagickFalse) combine_image = DestroyImage(combine_image); return (combine_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha * channel is % not activated. That is, the image is RGB rather than RGBA * or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel * method is: % % MagickBooleanType GetImageAlphaChannel(const Image * *image) % % A description of each parameter follows: % % o image: the * image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image * image) { assert(image != (const Image *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); return (image->matte); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImageChannel() separates a channel from the image and * returns it as % a grayscale image. A channel is a particular color * component of each pixel % in the image. % % The format of the * SeparateImageChannel method is: % % MagickBooleanType * SeparateImageChannel(Image *image, % const ChannelType channel) % % * A description of each parameter follows: % % o image: the image. % % * o channel: Identify which channel to extract: RedChannel, GreenChannel, % * BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % * YellowChannel, or BlackChannel. % */ MagickExport Image * SeparateImage(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * separate_image; MagickBooleanType status; /* * Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image = CloneImage(image, 0, 0, MagickTrue, exception); if (separate_image == (Image *) NULL) return ((Image *) NULL); status = SeparateImageChannel(separate_image, channel); if (status == MagickFalse) separate_image = DestroyImage(separate_image); return (separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image * image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); if (channel == GrayChannels) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel); /* * Separate image channels. */ status = MagickTrue; progress = 0; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q, GetPixelRed(q)); SetPixelBlue(q, GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelGreen(q)); SetPixelBlue(q, GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelBlue(q)); SetPixelGreen(q, GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelOpacity(q)); SetPixelGreen(q, GetPixelOpacity(q)); SetPixelBlue(q, GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelIndex(indexes + x)); SetPixelGreen(q, GetPixelIndex(indexes + x)); SetPixelBlue(q, GetPixelIndex(indexes + x)); q++; } break; } case TrueAlphaChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelAlpha(q)); SetPixelGreen(q, GetPixelAlpha(q)); SetPixelBlue(q, GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed = SetImageProgress(image, SeparateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); if (channel != GrayChannels) { image->matte = MagickFalse; (void)SetImageColorspace(image, GRAYColorspace); } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImages() returns a separate grayscale image for each * channel % specified. % % The format of the SeparateImages method is: % % * MagickBooleanType SeparateImages(const Image *image, % const * ChannelType channel,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o channel: Identify * which channels to extract: RedChannel, GreenChannel, % BlueChannel, * OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or * BlackChannel. % % o exception: return any errors or warnings in this * structure. % */ MagickExport Image * SeparateImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); images = NewImageList(); if ((channel & RedChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, RedChannel); AppendImageToList(&images, separate_image); } if ((channel & GreenChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, GreenChannel); AppendImageToList(&images, separate_image); } if ((channel & BlueChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlueChannel); AppendImageToList(&images, separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlackChannel); AppendImageToList(&images, separate_image); } if ((channel & AlphaChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, TrueAlphaChannel); AppendImageToList(&images, separate_image); } return (images); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets * the alpha % channel. % % The format of the SetImageAlphaChannel method * is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % * const AlphaChannelType alpha_type) % % A description of each parameter * follows: % % o image: the image. % % o alpha_type: The alpha * channel type: ActivateAlphaChannel, % AssociateAlphaChannel, * CopyAlphaChannel, Disassociate, % DeactivateAlphaChannel, * ExtractAlphaChannel, OpaqueAlphaChannel, % ResetAlphaChannel, * SetAlphaChannel, ShapeAlphaChannel, and % TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image * image, const AlphaChannelType alpha_type) { CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); exception = (&image->exception); status = MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { if (image->matte == MagickTrue) return (status); image->matte = MagickTrue; break; } case AssociateAlphaChannel: { /* * Associate alpha. */ status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma; gamma = QuantumScale * GetPixelAlpha(q); SetPixelRed(q, ClampToQuantum(gamma * GetPixelRed(q))); SetPixelGreen(q, ClampToQuantum(gamma * GetPixelGreen(q))); SetPixelBlue(q, ClampToQuantum(gamma * GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->matte = MagickFalse; break; } case BackgroundAlphaChannel: { IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; /* * Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index = 0; SetPixelPacket(image, &background, &pixel, &index); status = MagickTrue; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q, pixel.red); SetPixelGreen(q, pixel.green); SetPixelBlue(q, pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* * Special usage case for SeparateImageChannel(): copy grayscale * color to the alpha channel. */ status = SeparateImageChannel(image, GrayChannels); image->matte = MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* * Reset all color channels to background color. */ GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &(image->background_color), (IndexPacket *) NULL, &background); (void)LevelColorsImage(image, &background, &background, MagickTrue); } break; } case DeactivateAlphaChannel: { if (image->matte == MagickFalse) return (status); image->matte = MagickFalse; break; } case DisassociateAlphaChannel: { status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; image->matte = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double alpha, gamma; alpha = QuantumScale * GetPixelAlpha(q); gamma = PerceptibleReciprocal(alpha); SetPixelRed(q, ClampToQuantum(gamma * GetPixelRed(q))); SetPixelGreen(q, ClampToQuantum(gamma * GetPixelGreen(q))); SetPixelBlue(q, ClampToQuantum(gamma * GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->matte = MagickFalse; break; } case ExtractAlphaChannel: { status = SeparateImageChannel(image, TrueAlphaChannel); image->matte = MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { IndexPacket index; MagickPixelPacket background; PixelPacket pixel; /* * Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image, DirectClass) == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); (void)memset(&pixel, 0, sizeof(pixel)); index = 0; SetPixelPacket(image, &background, &pixel, &index); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma = 1.0 - QuantumScale * QuantumScale * q->opacity * pixel.opacity; opacity = (double)QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); q->red = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity, (MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity, (MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity, (MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity = ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status = SetImageOpacity(image, OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status = SetImageOpacity(image, OpaqueOpacity); break; } case TransparentAlphaChannel: { status = SetImageOpacity(image, TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return (status); return (SyncImagePixelCache(image, &image->exception)); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m b i n e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CombineImages() combines one or more images into a single image. * The % grayscale value of the pixels of each image in the sequence is * assigned in % order to the specified channels of the combined image. * The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, * etc. % % The format of the CombineImages method is: % % Image * *CombineImages(const Image *image,const ChannelType channel, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * CombineImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { #define CombineImageTag "Combine/Image" CacheView * combine_view; const Image * next; Image * combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Ensure the image are the same size. */ assert(image != (const Image *)NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next = image; next != (Image *) NULL; next = GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError, "ImagesAreNotTheSameSize"); } combine_image = CloneImage(image, 0, 0, MagickTrue, exception); if (combine_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(combine_image, DirectClass) == MagickFalse) { InheritException(exception, &combine_image->exception); combine_image = DestroyImage(combine_image); return ((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs(image->gamma - 1.0) <= MagickEpsilon) (void)SetImageColorspace(combine_image, RGBColorspace); else (void)SetImageColorspace(combine_image, sRGBColorspace); } if ((channel & OpacityChannel) != 0) combine_image->matte = MagickTrue; (void)SetImageBackgroundColor(combine_image); /* * Combine images. */ status = MagickTrue; progress = 0; combine_view = AcquireAuthenticCacheView(combine_image, exception); for (y = 0; y < (ssize_t) combine_image->rows; y++) { CacheView * image_view; const Image * next; PixelPacket * pixels; const PixelPacket * magick_restrict p; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(combine_view, 0, y, combine_image->columns, 1, exception); if (pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } next = image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket * indexes; image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; indexes = GetCacheViewAuthenticIndexQueue(combine_view); for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes + x, ClampToQuantum(GetPixelIntensity(image, p))); p++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, CombineImageTag, progress, combine_image->rows); if (proceed == MagickFalse) status = MagickFalse; } } combine_view = DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void)TransformImageColorspace(combine_image, sRGBColorspace); if (status == MagickFalse) combine_image = DestroyImage(combine_image); return (combine_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha * channel is % not activated. That is, the image is RGB rather than RGBA * or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel * method is: % % MagickBooleanType GetImageAlphaChannel(const Image * *image) % % A description of each parameter follows: % % o image: the * image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image * image) { assert(image != (const Image *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); return (image->matte); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImageChannel() separates a channel from the image and * returns it as % a grayscale image. A channel is a particular color * component of each pixel % in the image. % % The format of the * SeparateImageChannel method is: % % MagickBooleanType * SeparateImageChannel(Image *image, % const ChannelType channel) % % * A description of each parameter follows: % % o image: the image. % % * o channel: Identify which channel to extract: RedChannel, GreenChannel, % * BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % * YellowChannel, or BlackChannel. % */ MagickExport Image * SeparateImage(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * separate_image; MagickBooleanType status; /* * Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image = CloneImage(image, 0, 0, MagickTrue, exception); if (separate_image == (Image *) NULL) return ((Image *) NULL); status = SeparateImageChannel(separate_image, channel); if (status == MagickFalse) separate_image = DestroyImage(separate_image); return (separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image * image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); if (channel == GrayChannels) (void)SetImageAlphaChannel(image, OpaqueAlphaChannel); /* * Separate image channels. */ status = MagickTrue; progress = 0; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q, GetPixelRed(q)); SetPixelBlue(q, GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelGreen(q)); SetPixelBlue(q, GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelBlue(q)); SetPixelGreen(q, GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelOpacity(q)); SetPixelGreen(q, GetPixelOpacity(q)); SetPixelBlue(q, GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelIndex(indexes + x)); SetPixelGreen(q, GetPixelIndex(indexes + x)); SetPixelBlue(q, GetPixelIndex(indexes + x)); q++; } break; } case TrueAlphaChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelAlpha(q)); SetPixelGreen(q, GetPixelAlpha(q)); SetPixelBlue(q, GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed = SetImageProgress(image, SeparateImageTag, progress, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); if (channel != GrayChannels) { image->matte = MagickFalse; (void)SetImageColorspace(image, GRAYColorspace); } return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImages() returns a separate grayscale image for each * channel % specified. % % The format of the SeparateImages method is: % % * MagickBooleanType SeparateImages(const Image *image, % const * ChannelType channel,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o channel: Identify * which channels to extract: RedChannel, GreenChannel, % BlueChannel, * OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or * BlackChannel. % % o exception: return any errors or warnings in this * structure. % */ MagickExport Image * SeparateImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); images = NewImageList(); if ((channel & RedChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, RedChannel); AppendImageToList(&images, separate_image); } if ((channel & GreenChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, GreenChannel); AppendImageToList(&images, separate_image); } if ((channel & BlueChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlueChannel); AppendImageToList(&images, separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlackChannel); AppendImageToList(&images, separate_image); } if ((channel & AlphaChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, TrueAlphaChannel); AppendImageToList(&images, separate_image); } return (images); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets * the alpha % channel. % % The format of the SetImageAlphaChannel method * is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % * const AlphaChannelType alpha_type) % % A description of each parameter * follows: % % o image: the image. % % o alpha_type: The alpha * channel type: ActivateAlphaChannel, % AssociateAlphaChannel, * CopyAlphaChannel, Disassociate, % DeactivateAlphaChannel, * ExtractAlphaChannel, OpaqueAlphaChannel, % ResetAlphaChannel, * SetAlphaChannel, ShapeAlphaChannel, and % TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image * image, const AlphaChannelType alpha_type) { CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickCoreSignature); exception = (&image->exception); status = MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { if (image->matte == MagickTrue) return (status); image->matte = MagickTrue; break; } case AssociateAlphaChannel: { /* * Associate alpha. */ status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma; gamma = QuantumScale * GetPixelAlpha(q); SetPixelRed(q, ClampToQuantum(gamma * GetPixelRed(q))); SetPixelGreen(q, ClampToQuantum(gamma * GetPixelGreen(q))); SetPixelBlue(q, ClampToQuantum(gamma * GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->matte = MagickFalse; break; } case BackgroundAlphaChannel: { IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; /* * Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index = 0; SetPixelPacket(image, &background, &pixel, &index); status = MagickTrue; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q, pixel.red); SetPixelGreen(q, pixel.green); SetPixelBlue(q, pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* * Special usage case for SeparateImageChannel(): copy grayscale * color to the alpha channel. */ status = SeparateImageChannel(image, GrayChannels); image->matte = MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* * Reset all color channels to background color. */ GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &(image->background_color), (IndexPacket *) NULL, &background); (void)LevelColorsImage(image, &background, &background, MagickTrue); } break; } case DeactivateAlphaChannel: { if (image->matte == MagickFalse) return (status); image->matte = MagickFalse; break; } case DisassociateAlphaChannel: { status = SetImageStorageClass(image, DirectClass); if (status == MagickFalse) break; image->matte = MagickTrue; image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double alpha, gamma; alpha = QuantumScale * GetPixelAlpha(q); gamma = PerceptibleReciprocal(alpha); SetPixelRed(q, ClampToQuantum(gamma * GetPixelRed(q))); SetPixelGreen(q, ClampToQuantum(gamma * GetPixelGreen(q))); SetPixelBlue(q, ClampToQuantum(gamma * GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); image->matte = MagickFalse; break; } case ExtractAlphaChannel: { status = SeparateImageChannel(image, TrueAlphaChannel); image->matte = MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { IndexPacket index; MagickPixelPacket background; PixelPacket pixel; /* * Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image, DirectClass) == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); (void)memset(&pixel, 0, sizeof(pixel)); index = 0; SetPixelPacket(image, &background, &pixel, &index); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { IndexPacket * magick_restrict indexes; PixelPacket * magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma = 1.0 - QuantumScale * QuantumScale * q->opacity * pixel.opacity; opacity = (double)QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); q->red = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity, (MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity, (MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity, (MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity = ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status = SetImageOpacity(image, OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status = SetImageOpacity(image, OpaqueOpacity); break; } case TransparentAlphaChannel: { status = SetImageOpacity(image, TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return (status); return (SyncImagePixelCache(image, &image->exception)); }
Smoother.h
// File : Smoother.h // Created : Sun Oct 29 2017 12:36:21 PM (+0100) // Author : Fabian Wermelinger // Description: Smooth data // Copyright 2017 ETH Zurich. All Rights Reserved. #ifndef SMOOTHER_H_2PBGUG6D #define SMOOTHER_H_2PBGUG6D #include "Cubism/BlockInfo.h" #include "GridOperator.h" #include "Prolongation/MPI_GridTransfer.h" #include <cassert> #include <vector> using namespace cubism; template <typename TGridIn, typename TGridOut, typename TBlockLab> class Smoother : public GridOperator<TGridIn, TGridOut, TBlockLab> { public: Smoother(ArgumentParser &p) : GridOperator<TGridIn, TGridOut, TBlockLab>(p) { } ~Smoother() = default; void operator()(const TGridIn &grid_in, TGridOut &grid_out, const bool verbose) override { // 0.) checks typedef typename TGridIn::BlockType TBlockIn; typedef typename TGridOut::BlockType TBlockOut; assert(TBlockIn::sizeX == TBlockOut::sizeX); assert(TBlockIn::sizeY == TBlockOut::sizeY); assert(TBlockIn::sizeZ == TBlockOut::sizeZ); assert(grid_in.getResidentBlocksPerDimension(0) == grid_out.getResidentBlocksPerDimension(0)); assert(grid_in.getResidentBlocksPerDimension(1) == grid_out.getResidentBlocksPerDimension(1)); assert(grid_in.getResidentBlocksPerDimension(2) == grid_out.getResidentBlocksPerDimension(2)); const size_t smooth_iter = this->m_parser("smooth_iter").asInt(0); // copy over std::vector<BlockInfo> info_in = grid_in.getResidentBlocksInfo(); std::vector<BlockInfo> info_out = grid_out.getResidentBlocksInfo(); assert(info_in.size() == info_out.size()); #pragma omp parallel for for (size_t i = 0; i < info_out.size(); i++) { BlockInfo infoout = info_out[i]; TBlockOut &bout = *(TBlockOut *)infoout.ptrBlock; bout.clear(); // zero data } #pragma omp parallel for for (size_t i = 0; i < info_in.size(); i++) { // src BlockInfo infoin = info_in[i]; TBlockIn &bin = *(TBlockIn *)infoin.ptrBlock; // dst BlockInfo infoout = info_out[i]; TBlockOut &bout = *(TBlockOut *)infoout.ptrBlock; for (int iz = 0; iz < TBlockIn::sizeZ; iz++) for (int iy = 0; iy < TBlockIn::sizeY; iy++) for (int ix = 0; ix < TBlockIn::sizeX; ix++) bout(ix, iy, iz) = bin(ix, iy, iz); } // smooth out grid for (size_t i = 0; i < smooth_iter; ++i) { if (verbose) std::cout << "smoothing grid: iteration " << i + 1 << std::endl; grid_smoother smoother; process<TBlockLab>(smoother, grid_out, 0, 0); } } }; #endif /* SMOOTHER_H_2PBGUG6D */
// File:Smoother.h // Created: Sun Oct 29 2017 12: 36:21 PM(+0100) // Author:Fabian Wermelinger // Description:Smooth data // Copyright 2017 ETH Zurich.All Rights Reserved. #ifndef SMOOTHER_H_2PBGUG6D #define SMOOTHER_H_2PBGUG6D #include "Cubism/BlockInfo.h" #include "GridOperator.h" #include "Prolongation/MPI_GridTransfer.h" #include <cassert> #include <vector> using namespace cubism; template < typename TGridIn, typename TGridOut, typename TBlockLab > class Smoother:public GridOperator < TGridIn, TGridOut, TBlockLab > { public: Smoother(ArgumentParser & p):GridOperator < TGridIn, TGridOut, TBlockLab > (p) { } ~Smoother() = default; void operator() (const TGridIn & grid_in, TGridOut & grid_out, const bool verbose)override { //0.) checks typedef typename TGridIn::BlockType TBlockIn; typedef typename TGridOut::BlockType TBlockOut; assert(TBlockIn: : sizeX == TBlockOut: : sizeX); assert(TBlockIn: : sizeY == TBlockOut: : sizeY); assert(TBlockIn: : sizeZ == TBlockOut: : sizeZ); assert(grid_in.getResidentBlocksPerDimension(0) == grid_out.getResidentBlocksPerDimension(0)); assert(grid_in.getResidentBlocksPerDimension(1) == grid_out.getResidentBlocksPerDimension(1)); assert(grid_in.getResidentBlocksPerDimension(2) == grid_out.getResidentBlocksPerDimension(2)); const size_t smooth_iter = this->m_parser("smooth_iter").asInt(0); //copy over std: : vector < BlockInfo > info_in = grid_in.getResidentBlocksInfo(); std: : vector < BlockInfo > info_out = grid_out.getResidentBlocksInfo(); assert(info_in.size() == info_out.size()); for (size_t i = 0; i < info_out.size(); i++) { BlockInfo infoout = info_out[i]; TBlockOut & bout = *(TBlockOut *) infoout.ptrBlock; bout.clear(); //zero data } for (size_t i = 0; i < info_in.size(); i++) { //src BlockInfo infoin = info_in[i]; TBlockIn & bin = *(TBlockIn *) infoin.ptrBlock; //dst BlockInfo infoout = info_out[i]; TBlockOut & bout = *(TBlockOut *) infoout.ptrBlock; for (int iz = 0; iz < TBlockIn: :sizeZ; iz++) for (int iy = 0; iy < TBlockIn: :sizeY; iy++) for (int ix = 0; ix < TBlockIn: :sizeX; ix++) bout(ix, iy, iz) = bin(ix, iy, iz); } //smooth out grid for (size_t i = 0; i < smooth_iter; ++i) { if (verbose) std: : cout << "smoothing grid: iteration " << i + 1 << std: :endl; grid_smoother smoother; process < TBlockLab > (smoother, grid_out, 0, 0); } } }; #endif /* SMOOTHER_H_2PBGUG6D */
// File:Smoother.h // Created: Sun Oct 29 2017 12: 36:21 PM(+0100) // Author:Fabian Wermelinger // Description:Smooth data // Copyright 2017 ETH Zurich.All Rights Reserved. #ifndef SMOOTHER_H_2PBGUG6D #define SMOOTHER_H_2PBGUG6D #include "Cubism/BlockInfo.h" #include "GridOperator.h" #include "Prolongation/MPI_GridTransfer.h" #include <cassert> #include <vector> using namespace cubism; template < typename TGridIn, typename TGridOut, typename TBlockLab > class Smoother:public GridOperator < TGridIn, TGridOut, TBlockLab > { public: Smoother(ArgumentParser & p):GridOperator < TGridIn, TGridOut, TBlockLab > (p) { } ~Smoother() = default; void operator() (const TGridIn & grid_in, TGridOut & grid_out, const bool verbose)override { //0.) checks typedef typename TGridIn::BlockType TBlockIn; typedef typename TGridOut::BlockType TBlockOut; assert(TBlockIn: : sizeX == TBlockOut: : sizeX); assert(TBlockIn: : sizeY == TBlockOut: : sizeY); assert(TBlockIn: : sizeZ == TBlockOut: : sizeZ); assert(grid_in.getResidentBlocksPerDimension(0) == grid_out.getResidentBlocksPerDimension(0)); assert(grid_in.getResidentBlocksPerDimension(1) == grid_out.getResidentBlocksPerDimension(1)); assert(grid_in.getResidentBlocksPerDimension(2) == grid_out.getResidentBlocksPerDimension(2)); const size_t smooth_iter = this->m_parser("smooth_iter").asInt(0); //copy over std: : vector < BlockInfo > info_in = grid_in.getResidentBlocksInfo(); std: : vector < BlockInfo > info_out = grid_out.getResidentBlocksInfo(); assert(info_in.size() == info_out.size()); #pragma omp parallel for for (size_t i = 0; i < info_out.size(); i++) { BlockInfo infoout = info_out[i]; TBlockOut & bout = *(TBlockOut *) infoout.ptrBlock; bout.clear(); //zero data } #pragma omp parallel for for (size_t i = 0; i < info_in.size(); i++) { //src BlockInfo infoin = info_in[i]; TBlockIn & bin = *(TBlockIn *) infoin.ptrBlock; //dst BlockInfo infoout = info_out[i]; TBlockOut & bout = *(TBlockOut *) infoout.ptrBlock; for (int iz = 0; iz < TBlockIn: :sizeZ; iz++) for (int iy = 0; iy < TBlockIn: :sizeY; iy++) for (int ix = 0; ix < TBlockIn: :sizeX; ix++) bout(ix, iy, iz) = bin(ix, iy, iz); } //smooth out grid for (size_t i = 0; i < smooth_iter; ++i) { if (verbose) std: : cout << "smoothing grid: iteration " << i + 1 << std: :endl; grid_smoother smoother; process < TBlockLab > (smoother, grid_out, 0, 0); } } }; #endif /* SMOOTHER_H_2PBGUG6D */
ast-dump-openmp-section.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test(void) { #pragma omp sections { #pragma omp section ; } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-section.c:3:1, line:9:1> line:3:6 test 'void (void)' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:9:1> // CHECK-NEXT: `-OMPSectionsDirective {{.*}} <line:4:1, col:21> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3, line:8:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-CompoundStmt {{.*}} <line:5:3, line:8:3> // CHECK-NEXT: | `-OMPSectionDirective {{.*}} <line:6:1, col:20> // CHECK-NEXT: | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-section.c:4:1) *const restrict'
// RUN:%clang_cc1 - triple x86_64 - unknown - unknown - fopenmp - ast - dump % s | FileCheck-- match - full - lines - implicit - check - not = openmp_structured_block % s void test(void) { ; } //CHECK:TranslationUnitDecl { { .* } } <<invalid sloc >> <invalid sloc > //CHECK:`-FunctionDecl { { .* } } < { { .* } } ast - dump - openmp - section.c: 3: 1, line: 9: 1 > line: 3:6 test 'void (void)' // CHECK - NEXT:`-CompoundStmt { { .* } } <col: 17, line: 9:1 > //CHECK - NEXT:`-OMPSectionsDirective { { .* } } <line: 4: 1, col:21 > //CHECK - NEXT:`-CapturedStmt { { .* } } <line: 5: 3, line: 8:3 > //CHECK - NEXT:`-CapturedDecl { { .* } } <<invalid sloc >> <invalid sloc > //CHECK - NEXT:|-CompoundStmt { { .* } } <line: 5: 3, line: 8:3 > //CHECK - NEXT:|`-OMPSectionDirective { { .* } } <line: 6: 1, col:20 > //CHECK - NEXT:|`-NullStmt { { .* } } <line: 7:5 > //CHECK - NEXT:`-ImplicitParamDecl { { .* } } <line: 4: 1 > col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-section.c:4:1) *const restrict'
// RUN:%clang_cc1 - triple x86_64 - unknown - unknown - fopenmp - ast - dump % s | FileCheck-- match - full - lines - implicit - check - not = openmp_structured_block % s void test(void) { #pragma omp sections { #pragma omp section ; } } //CHECK:TranslationUnitDecl { { .* } } <<invalid sloc >> <invalid sloc > //CHECK:`-FunctionDecl { { .* } } < { { .* } } ast - dump - openmp - section.c: 3: 1, line: 9: 1 > line: 3:6 test 'void (void)' // CHECK - NEXT:`-CompoundStmt { { .* } } <col: 17, line: 9:1 > //CHECK - NEXT:`-OMPSectionsDirective { { .* } } <line: 4: 1, col:21 > //CHECK - NEXT:`-CapturedStmt { { .* } } <line: 5: 3, line: 8:3 > //CHECK - NEXT:`-CapturedDecl { { .* } } <<invalid sloc >> <invalid sloc > //CHECK - NEXT:|-CompoundStmt { { .* } } <line: 5: 3, line: 8:3 > //CHECK - NEXT:|`-OMPSectionDirective { { .* } } <line: 6: 1, col:20 > //CHECK - NEXT:|`-NullStmt { { .* } } <line: 7:5 > //CHECK - NEXT:`-ImplicitParamDecl { { .* } } <line: 4: 1 > col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-section.c:4:1) *const restrict'
server.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/socket.h> #include <sys/un.h> #include <omp.h> #include "names.h" #include "database.h" /* Creation new socket for current thread */ int create_soket(int thr) { struct sockaddr_un sock_addr; int sockfd; sockfd = socket(PF_UNIX, SOCK_DGRAM, 0); if (sockfd < 0) { perror("Socket creation failed!\n"); return -1; } sock_addr.sun_family = AF_UNIX; sprintf(sock_addr.sun_path, "%s%d", SERVER_SOCKET_FILE, thr); unlink(sock_addr.sun_path); if (bind(sockfd, (struct sockaddr *) &sock_addr, sizeof(sock_addr)) < 0) { perror("Socket binding failed!\n"); close(sockfd); return -1; } return sockfd; } /* Wrappers for call database functions */ /* Call of DB functions and send of ansvers to client */ void server_functs(struct mydb_t *db, char c_type, char **arg, int sockfd, struct sockaddr_un *addr, socklen_t addr_len) { switch (c_type) { case C_PUT: { char res = mydb_put(db, arg[0], arg[1]) ? 0 : 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *) addr, addr_len); break; } case C_GET: { const char *val = mydb_get(db, arg[0]); int count = !val ? 0 : 1; sendto(sockfd, &count, sizeof(int), 0, (struct sockaddr *) addr, addr_len); if (count) { int buf_size = strlen(val) + 1; sendto(sockfd, &buf_size, sizeof(int), 0, (struct sockaddr *) addr, addr_len); sendto(sockfd, val, buf_size, 0, (struct sockaddr *) addr, addr_len); } break; } case C_LIST: { size_t count; const char **list = mydb_list(db, &count); sendto(sockfd, &count, sizeof(int), 0, (struct sockaddr *) addr, addr_len); for (size_t i = 0; i < count; i++) { int buf_size = strlen(list[i]) + 1; sendto(sockfd, &buf_size, sizeof(int), 0, (struct sockaddr *) addr, addr_len); sendto(sockfd, list[i], buf_size, 0, (struct sockaddr *) addr, addr_len); } free(list); break; } case C_ERACE: { char res = mydb_erase(db, arg[0]) ? 0 : 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *) addr, addr_len); break; } case C_EXIT: { mydb_close(db); char res = 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *) addr, addr_len); if (sockfd >= 0) close(sockfd); exit(EXIT_SUCCESS); } } } int is_read(const char c_type) { if (c_type == C_LIST || c_type == C_GET) return 1; return 0; } int main(int argc, const char *argv[]) { /* Database init */ struct mydb_t *db = mydb_init(FILE_DATA, FILE_MDATA, 1); if (!db) { printf("DB not created\n"); return 1; } omp_lock_t lock; omp_init_lock(&lock); /* Count of working read functions at the moment */ int read_f = 0; /* Init of parallel region */ #pragma omp parallel num_threads(SERVER_NUM_THREADS) { int thr = omp_get_thread_num(); /* Server init */ int sockfd ; if ((sockfd = create_soket(thr)) < 0) { exit(EXIT_FAILURE); } struct sockaddr_un from_addr; socklen_t sockaddr_len = sizeof(struct sockaddr_un); char c_type, ibuf[20], *arg[2]; int arg_len[2]; for(;;) { /* Get info about future request */ recvfrom(sockfd, ibuf, 20, 0, (struct sockaddr *) &from_addr, &sockaddr_len); /* Decode the info */ c_type = (unsigned char) ibuf[0]; arg_len[0] = *((int*)&ibuf[1]); arg_len[1] = *((int*)&ibuf[1 + sizeof(int)]); /* Get request */ for (int i = 0; i < 2 && arg_len[i] > 0; i++) { arg[i] = malloc(arg_len[i]); recvfrom(sockfd, arg[i], arg_len[i], 0, (struct sockaddr *) &from_addr, &sockaddr_len); } /* Request to DB */ omp_set_lock(&lock); if (is_read(c_type)) { /* If it's just read function then free the lock */ omp_unset_lock(&lock); /* Increment the count of working read functions */ #pragma omp atomic read_f++; } else { /* If it's write function then * wait for complete all of read function */ while (read_f) sleep(10); } server_functs(db, c_type, arg, sockfd, &from_addr, sockaddr_len); if (is_read(c_type)) { /* Decrement the count of working read functions */ #pragma omp atomic read_f--; } else { /* Free the lock */ omp_unset_lock(&lock); } for (int i = 0; i < 2 && arg_len[i] > 0; i++) free(arg[i]); } } return EXIT_SUCCESS; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/socket.h> #include <sys/un.h> #include <omp.h> #include "names.h" #include "database.h" /* Creation new socket for current thread */ int create_soket(int thr) { struct sockaddr_un sock_addr; int sockfd; sockfd = socket(PF_UNIX, SOCK_DGRAM, 0); if (sockfd < 0) { perror("Socket creation failed!\n"); return -1; } sock_addr.sun_family = AF_UNIX; sprintf(sock_addr.sun_path, "%s%d", SERVER_SOCKET_FILE, thr); unlink(sock_addr.sun_path); if (bind(sockfd, (struct sockaddr *)&sock_addr, sizeof(sock_addr)) < 0) { perror("Socket binding failed!\n"); close(sockfd); return -1; } return sockfd; } /* Wrappers for call database functions */ /* Call of DB functions and send of ansvers to client */ void server_functs(struct mydb_t *db, char c_type, char **arg, int sockfd, struct sockaddr_un *addr, socklen_t addr_len) { switch (c_type) { case C_PUT: { char res = mydb_put(db, arg[0], arg[1]) ? 0 : 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *)addr, addr_len); break; } case C_GET: { const char *val = mydb_get(db, arg[0]); int count = !val ? 0 : 1; sendto(sockfd, &count, sizeof(int), 0, (struct sockaddr *)addr, addr_len); if (count) { int buf_size = strlen(val) + 1; sendto(sockfd, &buf_size, sizeof(int), 0, (struct sockaddr *)addr, addr_len); sendto(sockfd, val, buf_size, 0, (struct sockaddr *)addr, addr_len); } break; } case C_LIST: { size_t count; const char **list = mydb_list(db, &count); sendto(sockfd, &count, sizeof(int), 0, (struct sockaddr *)addr, addr_len); for (size_t i = 0; i < count; i++) { int buf_size = strlen(list[i]) + 1; sendto(sockfd, &buf_size, sizeof(int), 0, (struct sockaddr *)addr, addr_len); sendto(sockfd, list[i], buf_size, 0, (struct sockaddr *)addr, addr_len); } free(list); break; } case C_ERACE: { char res = mydb_erase(db, arg[0]) ? 0 : 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *)addr, addr_len); break; } case C_EXIT: { mydb_close(db); char res = 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *)addr, addr_len); if (sockfd >= 0) close(sockfd); exit(EXIT_SUCCESS); } } } int is_read(const char c_type) { if (c_type == C_LIST || c_type == C_GET) return 1; return 0; } int main(int argc, const char *argv[]) { /* Database init */ struct mydb_t *db = mydb_init(FILE_DATA, FILE_MDATA, 1); if (!db) { printf("DB not created\n"); return 1; } omp_lock_t lock; omp_init_lock(&lock); /* Count of working read functions at the moment */ int read_f = 0; /* Init of parallel region */ int thr = omp_get_thread_num(); /* Server init */ int sockfd; if ((sockfd = create_soket(thr)) < 0) { exit(EXIT_FAILURE); } struct sockaddr_un from_addr; socklen_t sockaddr_len = sizeof(struct sockaddr_un); char c_type, ibuf[20], *arg[2]; int arg_len[2]; for (;;) { /* Get info about future request */ recvfrom(sockfd, ibuf, 20, 0, (struct sockaddr *)&from_addr, &sockaddr_len); /* Decode the info */ c_type = (unsigned char)ibuf[0]; arg_len[0] = *((int *)&ibuf[1]); arg_len[1] = *((int *)&ibuf[1 + sizeof(int)]); /* Get request */ for (int i = 0; i < 2 && arg_len[i] > 0; i++) { arg[i] = malloc(arg_len[i]); recvfrom(sockfd, arg[i], arg_len[i], 0, (struct sockaddr *)&from_addr, &sockaddr_len); } /* Request to DB */ omp_set_lock(&lock); if (is_read(c_type)) { /* If it's just read function then free the lock */ omp_unset_lock(&lock); /* Increment the count of working read functions */ read_f++; } else { /* * If it's write function then wait for complete all of read * function */ while (read_f) sleep(10); } server_functs(db, c_type, arg, sockfd, &from_addr, sockaddr_len); if (is_read(c_type)) { /* Decrement the count of working read functions */ read_f--; } else { /* Free the lock */ omp_unset_lock(&lock); } for (int i = 0; i < 2 && arg_len[i] > 0; i++) free(arg[i]); } return EXIT_SUCCESS; }
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/socket.h> #include <sys/un.h> #include <omp.h> #include "names.h" #include "database.h" /* Creation new socket for current thread */ int create_soket(int thr) { struct sockaddr_un sock_addr; int sockfd; sockfd = socket(PF_UNIX, SOCK_DGRAM, 0); if (sockfd < 0) { perror("Socket creation failed!\n"); return -1; } sock_addr.sun_family = AF_UNIX; sprintf(sock_addr.sun_path, "%s%d", SERVER_SOCKET_FILE, thr); unlink(sock_addr.sun_path); if (bind(sockfd, (struct sockaddr *)&sock_addr, sizeof(sock_addr)) < 0) { perror("Socket binding failed!\n"); close(sockfd); return -1; } return sockfd; } /* Wrappers for call database functions */ /* Call of DB functions and send of ansvers to client */ void server_functs(struct mydb_t *db, char c_type, char **arg, int sockfd, struct sockaddr_un *addr, socklen_t addr_len) { switch (c_type) { case C_PUT: { char res = mydb_put(db, arg[0], arg[1]) ? 0 : 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *)addr, addr_len); break; } case C_GET: { const char *val = mydb_get(db, arg[0]); int count = !val ? 0 : 1; sendto(sockfd, &count, sizeof(int), 0, (struct sockaddr *)addr, addr_len); if (count) { int buf_size = strlen(val) + 1; sendto(sockfd, &buf_size, sizeof(int), 0, (struct sockaddr *)addr, addr_len); sendto(sockfd, val, buf_size, 0, (struct sockaddr *)addr, addr_len); } break; } case C_LIST: { size_t count; const char **list = mydb_list(db, &count); sendto(sockfd, &count, sizeof(int), 0, (struct sockaddr *)addr, addr_len); for (size_t i = 0; i < count; i++) { int buf_size = strlen(list[i]) + 1; sendto(sockfd, &buf_size, sizeof(int), 0, (struct sockaddr *)addr, addr_len); sendto(sockfd, list[i], buf_size, 0, (struct sockaddr *)addr, addr_len); } free(list); break; } case C_ERACE: { char res = mydb_erase(db, arg[0]) ? 0 : 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *)addr, addr_len); break; } case C_EXIT: { mydb_close(db); char res = 1; sendto(sockfd, &res, 1, 0, (struct sockaddr *)addr, addr_len); if (sockfd >= 0) close(sockfd); exit(EXIT_SUCCESS); } } } int is_read(const char c_type) { if (c_type == C_LIST || c_type == C_GET) return 1; return 0; } int main(int argc, const char *argv[]) { /* Database init */ struct mydb_t *db = mydb_init(FILE_DATA, FILE_MDATA, 1); if (!db) { printf("DB not created\n"); return 1; } omp_lock_t lock; omp_init_lock(&lock); /* Count of working read functions at the moment */ int read_f = 0; /* Init of parallel region */ #pragma omp parallel num_threads(SERVER_NUM_THREADS) { int thr = omp_get_thread_num(); /* Server init */ int sockfd; if ((sockfd = create_soket(thr)) < 0) { exit(EXIT_FAILURE); } struct sockaddr_un from_addr; socklen_t sockaddr_len = sizeof(struct sockaddr_un); char c_type, ibuf[20], *arg[2]; int arg_len[2]; for (;;) { /* Get info about future request */ recvfrom(sockfd, ibuf, 20, 0, (struct sockaddr *)&from_addr, &sockaddr_len); /* Decode the info */ c_type = (unsigned char)ibuf[0]; arg_len[0] = *((int *)&ibuf[1]); arg_len[1] = *((int *)&ibuf[1 + sizeof(int)]); /* Get request */ for (int i = 0; i < 2 && arg_len[i] > 0; i++) { arg[i] = malloc(arg_len[i]); recvfrom(sockfd, arg[i], arg_len[i], 0, (struct sockaddr *)&from_addr, &sockaddr_len); } /* Request to DB */ omp_set_lock(&lock); if (is_read(c_type)) { /* If it's just read function then free the lock */ omp_unset_lock(&lock); /* Increment the count of working read functions */ #pragma omp atomic read_f++; } else { /* * If it's write function then wait for complete all of read * function */ while (read_f) sleep(10); } server_functs(db, c_type, arg, sockfd, &from_addr, sockaddr_len); if (is_read(c_type)) { /* Decrement the count of working read functions */ #pragma omp atomic read_f--; } else { /* Free the lock */ omp_unset_lock(&lock); } for (int i = 0; i < 2 && arg_len[i] > 0; i++) free(arg[i]); } } return EXIT_SUCCESS; }
naugraph.c
/***************************************************************************** * * * Graph-specific auxiliary source file for version 2.2 of nauty. * * * * Copyright (1984-2002) Brendan McKay. All rights reserved. * * Subject to waivers and disclaimers in nauty.h. * * * * CHANGE HISTORY * * 16-Nov-00 : initial creation out of nautil.c * * 22-Apr-01 : added aproto line for Magma * * EXTDEFS is no longer required * * removed dynamic allocation from refine1() * * 21-Nov-01 : use NAUTYREQUIRED in naugraph_check() * * * *****************************************************************************/ #define ONE_WORD_SETS #include "nauty.h" /* macros for hash-codes: */ #define MASH(l,i) ((((l) ^ 065435) + (i)) & 077777) /* : expression whose long value depends only on long l and int/long i. Anything goes, preferably non-commutative. */ #define CLEANUP(l) ((int)((l) % 077777)) /* : expression whose value depends on long l and is less than 077777 when converted to int then short. Anything goes. */ #if MAXM==1 #define M 1 #else #define M m #endif /* aproto: header new_nauty_protos.h */ dispatchvec dispatch_graph = {isautom,testcanlab,updatecan,refine,refine1,cheapautom,bestcell, naugraph_freedyn,naugraph_check,NULL,NULL}; #if !MAXN DYNALLSTAT(set,workset,workset_sz); DYNALLSTAT(permutation,workperm,workperm_sz); DYNALLSTAT(int,bucket,bucket_sz); #else static set workset[MAXM]; /* used for scratch work */ static permutation workperm[MAXN]; static int bucket[MAXN+2]; #endif /***************************************************************************** * * * isautom(g,perm,digraph,m,n) = TRUE iff perm is an automorphism of g * * (i.e., g^perm = g). Symmetry is assumed unless digraph = TRUE. * * * *****************************************************************************/ boolean isautom(graph *g, permutation *perm, boolean digraph, int m, int n) { boolean autom=TRUE; #ifdef _OPENMP #pragma omp parallel #endif { int stride=1, offs=0; register set *pg; register int pos; set *pgp; int posp,i; #ifdef _OPENMP offs=omp_get_thread_num(); stride=omp_get_num_threads(); #endif for (i = offs; autom && i < n; i+=stride) { pg=g+M*i; pgp = GRAPHROW(g,perm[i],M); pos = (digraph ? -1 : i); while ((pos = nextelement(pg,M,pos)) >= 0) { posp = perm[pos]; if (!ISELEMENT(pgp,posp)) autom=FALSE; } } } return autom; } /***************************************************************************** * * * testcanlab(g,canong,lab,samerows,m,n) compares g^lab to canong, * * using an ordering which is immaterial since it's only used here. The * * value returned is -1,0,1 if g^lab <,=,> canong. *samerows is set to * * the number of rows (0..n) of canong which are the same as those of g^lab. * * * * GLOBALS ACCESSED: workset<rw>,permset(),workperm<rw> * * * *****************************************************************************/ int testcanlab(graph *g, graph *canong, int *lab, int *samerows, int m, int n) { register int i,j; register set *ph; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"testcanlab"); DYNALLOC1(set,workset,workset_sz,m,"testcanlab"); #endif for (i = 0; i < n; ++i) workperm[lab[i]] = i; for (i = 0, ph = canong; i < n; ++i, ph += M) { permset(GRAPHROW(g,lab[i],M),workset,M,workperm); for (j = 0; j < M; ++j) if (workset[j] < ph[j]) { *samerows = i; return -1; } else if (workset[j] > ph[j]) { *samerows = i; return 1; } } *samerows = n; return 0; } /***************************************************************************** * * * updatecan(g,canong,lab,samerows,m,n) sets canong = g^lab, assuming * * the first samerows of canong are ok already. * * * * GLOBALS ACCESSED: permset(),workperm<rw> * * * *****************************************************************************/ void updatecan(graph *g, graph *canong, permutation *lab, int samerows, int m, int n) { register int i; register set *ph; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"updatecan"); #endif for (i = 0; i < n; ++i) workperm[lab[i]] = i; for (i = samerows, ph = GRAPHROW(canong,samerows,M); i < n; ++i, ph += M) permset(GRAPHROW(g,lab[i],M),ph,M,workperm); } /***************************************************************************** * * * refine(g,lab,ptn,level,numcells,count,active,code,m,n) performs a * * refinement operation on the partition at the specified level of the * * partition nest (lab,ptn). *numcells is assumed to contain the number of * * cells on input, and is updated. The initial set of active cells (alpha * * in the paper) is specified in the set active. Precisely, x is in active * * iff the cell starting at index x in lab is active. * * The resulting partition is equitable if active is correct (see the paper * * and the Guide). * * *code is set to a value which depends on the fine detail of the * * algorithm, but which is independent of the labelling of the graph. * * count is used for work space. * * * * GLOBALS ACCESSED: workset<w>,bit<r>,nextelement(),bucket<w>,workperm<w> * * * *****************************************************************************/ void refine(graph *g, int *lab, int *ptn, int level, int *numcells, permutation *count, set *active, int *code, int m, int n) { #if MAXM==1 refine1(g,lab,ptn,level,numcells,count,active,code,m,n); } #else register int i,c1,c2,labc1; register setword x; register set *set1,*set2; int split1,split2,cell1,cell2; int cnt,bmin,bmax; long longcode; set *gptr; int maxcell,maxpos,hint; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine"); DYNALLOC1(set,workset,workset_sz,m,"refine"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine"); #endif longcode = *numcells; hint = 0; while (*numcells < n && ((split1 = hint, ISELEMENT(active,split1)) || (split1 = nextelement(active,M,split1)) >= 0 || (split1 = nextelement(active,M,-1)) >= 0)) { DELELEMENT(active,split1); for (split2 = split1; ptn[split2] > level; ++split2) {} longcode = MASH(longcode,split1+split2); if (split1 == split2) /* trivial splitting cell */ { gptr = GRAPHROW(g,lab[split1],M); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; c1 = cell1; c2 = cell2; while (c1 <= c2) { labc1 = lab[c1]; if (ISELEMENT(gptr,labc1)) ++c1; else { lab[c1] = lab[c2]; lab[c2] = labc1; --c2; } } if (c2 >= cell1 && c1 <= cell2) { ptn[c2] = level; longcode = MASH(longcode,c2); ++*numcells; if (ISELEMENT(active,cell1) || c2-cell1 >= cell2-c1) { ADDELEMENT(active,c1); if (c1 == cell2) hint = c1; } else { ADDELEMENT(active,cell1); if (c2 == cell1) hint = cell1; } } } } else /* nontrivial splitting cell */ { EMPTYSET(workset,m); for (i = split1; i <= split2; ++i) ADDELEMENT(workset,lab[i]); longcode = MASH(longcode,split2-split1+1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; i = cell1; set1 = workset; set2 = GRAPHROW(g,lab[i],m); cnt = 0; for (c1 = m; --c1 >= 0;) if ((x = (*set1++) & (*set2++)) != 0) cnt += POPCOUNT(x); count[i] = bmin = bmax = cnt; bucket[cnt] = 1; while (++i <= cell2) { set1 = workset; set2 = GRAPHROW(g,lab[i],m); cnt = 0; for (c1 = m; --c1 >= 0;) if ((x = (*set1++) & (*set2++)) != 0) cnt += POPCOUNT(x); while (bmin > cnt) bucket[--bmin] = 0; while (bmax < cnt) bucket[++bmax] = 0; ++bucket[cnt]; count[i] = cnt; } if (bmin == bmax) { longcode = MASH(longcode,bmin+cell1); continue; } c1 = cell1; maxcell = -1; maxpos=0; // just to shut up gcc warning for (i = bmin; i <= bmax; ++i) if (bucket[i]) { c2 = c1 + bucket[i]; bucket[i] = c1; longcode = MASH(longcode,i+c1); if (c2-c1 > maxcell) { maxcell = c2-c1; maxpos = c1; } if (c1 != cell1) { ADDELEMENT(active,c1); if (c2-c1 == 1) hint = c1; ++*numcells; } if (c2 <= cell2) ptn[c2-1] = level; c1 = c2; } for (i = cell1; i <= cell2; ++i) workperm[bucket[count[i]]++] = lab[i]; for (i = cell1; i <= cell2; ++i) lab[i] = workperm[i]; if (!ISELEMENT(active,cell1)) { ADDELEMENT(active,cell1); DELELEMENT(active,maxpos); } } } } longcode = MASH(longcode,*numcells); *code = CLEANUP(longcode); } #endif /* else case of MAXM==1 */ /***************************************************************************** * * * refine1(g,lab,ptn,level,numcells,count,active,code,m,n) is the same as * * refine(g,lab,ptn,level,numcells,count,active,code,m,n), except that * * m==1 is assumed for greater efficiency. The results are identical in all * * respects. See refine (above) for the specs. * * * *****************************************************************************/ void refine1(graph *g, int *lab, int *ptn, int level, int *numcells, permutation *count, set *active, int *code, int m, int n) { register int i,c1,c2,labc1; register setword x; int split1,split2,cell1,cell2; int cnt,bmin,bmax; long longcode; set *gptr,workset0; int maxcell,maxpos,hint; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine1"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine1"); #endif longcode = *numcells; hint = 0; while (*numcells < n && ((split1 = hint, ISELEMENT1(active,split1)) || (split1 = nextelement(active,1,split1)) >= 0 || (split1 = nextelement(active,1,-1)) >= 0)) { DELELEMENT1(active,split1); for (split2 = split1; ptn[split2] > level; ++split2) {} longcode = MASH(longcode,split1+split2); if (split1 == split2) /* trivial splitting cell */ { gptr = GRAPHROW(g,lab[split1],1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; c1 = cell1; c2 = cell2; while (c1 <= c2) { labc1 = lab[c1]; if (ISELEMENT1(gptr,labc1)) ++c1; else { lab[c1] = lab[c2]; lab[c2] = labc1; --c2; } } if (c2 >= cell1 && c1 <= cell2) { ptn[c2] = level; longcode = MASH(longcode,c2); ++*numcells; if (ISELEMENT1(active,cell1) || c2-cell1 >= cell2-c1) { ADDELEMENT1(active,c1); if (c1 == cell2) hint = c1; } else { ADDELEMENT1(active,cell1); if (c2 == cell1) hint = cell1; } } } } else /* nontrivial splitting cell */ { workset0 = 0; for (i = split1; i <= split2; ++i) ADDELEMENT1(&workset0,lab[i]); longcode = MASH(longcode,split2-split1+1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; i = cell1; if ((x = workset0 & g[lab[i]]) != 0) cnt = POPCOUNT(x); else cnt = 0; count[i] = bmin = bmax = cnt; bucket[cnt] = 1; while (++i <= cell2) { if ((x = workset0 & g[lab[i]]) != 0) cnt = POPCOUNT(x); else cnt = 0; while (bmin > cnt) bucket[--bmin] = 0; while (bmax < cnt) bucket[++bmax] = 0; ++bucket[cnt]; count[i] = cnt; } if (bmin == bmax) { longcode = MASH(longcode,bmin+cell1); continue; } c1 = cell1; maxcell = -1; maxpos=0; // only needed to silence gcc warning for (i = bmin; i <= bmax; ++i) if (bucket[i]) { c2 = c1 + bucket[i]; bucket[i] = c1; longcode = MASH(longcode,i+c1); if (c2-c1 > maxcell) { maxcell = c2-c1; maxpos = c1; } if (c1 != cell1) { ADDELEMENT1(active,c1); if (c2-c1 == 1) hint = c1; ++*numcells; } if (c2 <= cell2) ptn[c2-1] = level; c1 = c2; } for (i = cell1; i <= cell2; ++i) workperm[bucket[count[i]]++] = lab[i]; for (i = cell1; i <= cell2; ++i) lab[i] = workperm[i]; if (!ISELEMENT1(active,cell1)) { ADDELEMENT1(active,cell1); DELELEMENT1(active,maxpos); } } } } longcode = MASH(longcode,*numcells); *code = CLEANUP(longcode); } /***************************************************************************** * * * cheapautom(ptn,level,digraph,n) returns TRUE if the partition at the * * specified level in the partition nest (lab,ptn) {lab is not needed here} * * satisfies a simple sufficient condition for its cells to be the orbits of * * some subgroup of the automorphism group. Otherwise it returns FALSE. * * It always returns FALSE if digraph!=FALSE. * * * * nauty assumes that this function will always return TRUE for any * * partition finer than one for which it returns TRUE. * * * *****************************************************************************/ boolean cheapautom(int *ptn, int level, boolean digraph, int n) { register int i,k,nnt; if (digraph) return FALSE; k = n; nnt = 0; for (i = 0; i < n; ++i) { --k; if (ptn[i] > level) { ++nnt; while (ptn[++i] > level) {} } } return (k <= nnt + 1 || k <= 4); } /***************************************************************************** * * * bestcell(g,lab,ptn,level,tc_level,m,n) returns the index in lab of the * * start of the "best non-singleton cell" for fixing. If there is no * * non-singleton cell it returns n. * * This implementation finds the first cell which is non-trivially joined * * to the greatest number of other cells. * * * * GLOBALS ACCESSED: bit<r>,workperm<rw>,workset<rw>,bucket<rw> * * * *****************************************************************************/ int bestcell(graph *g, int *lab, int *ptn, int level, int tc_level, int m, int n) { register int i; set *gp; register setword setword1,setword2; int v1,v2,nnt; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine"); DYNALLOC1(set,workset,workset_sz,m,"refine"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine"); #endif /* find non-singleton cells: put starts in workperm[0..nnt-1] */ i = nnt = 0; while (i < n) { if (ptn[i] > level) { workperm[nnt++] = i; while (ptn[i] > level) ++i; } ++i; } if (nnt == 0) return n; /* set bucket[i] to # non-trivial neighbours of n.s. cell i */ for (i = nnt; --i >= 0;) bucket[i] = 0; for (v2 = 1; v2 < nnt; ++v2) { EMPTYSET(workset,m); i = workperm[v2] - 1; do { ++i; ADDELEMENT(workset,lab[i]); } while (ptn[i] > level); for (v1 = 0; v1 < v2; ++v1) { gp = GRAPHROW(g,lab[workperm[v1]],m); #if MAXM==1 setword1 = *workset & *gp; setword2 = *workset & ~*gp; #else setword1 = setword2 = 0; for (i = m; --i >= 0;) { setword1 |= workset[i] & gp[i]; setword2 |= workset[i] & ~gp[i]; } #endif if (setword1 != 0 && setword2 != 0) { ++bucket[v1]; ++bucket[v2]; } } } /* find first greatest bucket value */ v1 = 0; v2 = bucket[0]; for (i = 1; i < nnt; ++i) if (bucket[i] > v2) { v1 = i; v2 = bucket[i]; } return (int)workperm[v1]; } /***************************************************************************** * * * naugraph_check() checks that this file is compiled compatibly with the * * given parameters. If not, call exit(1). * * * *****************************************************************************/ void naugraph_check(int wordsize, int m, int n, int version) { if (wordsize != WORDSIZE) { fprintf(ERRFILE,"Error: WORDSIZE mismatch in naugraph.c\n"); exit(1); } #if MAXN if (m > MAXM) { fprintf(ERRFILE,"Error: MAXM inadequate in naugraph.c\n"); exit(1); } if (n > MAXN) { fprintf(ERRFILE,"Error: MAXN inadequate in naugraph.c\n"); exit(1); } #endif #ifdef BIGNAUTY if ((version & 1) == 0) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in naugraph.c\n"); exit(1); } #else if ((version & 1) == 1) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in naugraph.c\n"); exit(1); } #endif if (version < NAUTYREQUIRED) { fprintf(ERRFILE,"Error: naugraph.c version mismatch\n"); exit(1); } } /***************************************************************************** * * * naugraph_freedyn() - free the dynamic memory in this module * * * *****************************************************************************/ void naugraph_freedyn(void) { #if !MAXN DYNFREE(workset,workset_sz); DYNFREE(workperm,workperm_sz); DYNFREE(bucket,bucket_sz); #endif }
#define ONE_WORD_SETS #include "nauty.h" /* macros for hash-codes: */ #define MASH(l,i) ((((l) ^ 065435) + (i)) & 077777) /* : expression whose long value depends only on long l and int/long i. Anything goes, preferably non-commutative. */ #define CLEANUP(l) ((int)((l) % 077777)) /* : expression whose value depends on long l and is less than 077777 when converted to int then short. Anything goes. */ #if MAXM==1 #define M 1 #else #define M m #endif /* aproto: header new_nauty_protos.h */ dispatchvec dispatch_graph = {isautom,testcanlab,updatecan,refine,refine1,cheapautom,bestcell, naugraph_freedyn,naugraph_check,NULL,NULL}; #if !MAXN DYNALLSTAT(set,workset,workset_sz); DYNALLSTAT(permutation,workperm,workperm_sz); DYNALLSTAT(int,bucket,bucket_sz); #else static set workset[MAXM]; /* used for scratch work */ static permutation workperm[MAXN]; static int bucket[MAXN+2]; #endif /***************************************************************************** * * * isautom(g,perm,digraph,m,n) = TRUE iff perm is an automorphism of g * * (i.e., g^perm = g). Symmetry is assumed unless digraph = TRUE. * * * *****************************************************************************/ boolean isautom(graph *g, permutation *perm, boolean digraph, int m, int n) { boolean autom=TRUE; { int stride=1, offs=0; register set *pg; register int pos; set *pgp; int posp,i; for (i = offs; autom && i < n; i+=stride) { pg=g+M*i; pgp = GRAPHROW(g,perm[i],M); pos = (digraph ? -1 : i); while ((pos = nextelement(pg,M,pos)) >= 0) { posp = perm[pos]; if (!ISELEMENT(pgp,posp)) autom=FALSE; } } } return autom; } /***************************************************************************** * * * testcanlab(g,canong,lab,samerows,m,n) compares g^lab to canong, * * using an ordering which is immaterial since it's only used here. The * * value returned is -1,0,1 if g^lab <,=,> canong. *samerows is set to * * the number of rows (0..n) of canong which are the same as those of g^lab. * * * * GLOBALS ACCESSED: workset<rw>,permset(),workperm<rw> * * * *****************************************************************************/ int testcanlab(graph *g, graph *canong, int *lab, int *samerows, int m, int n) { register int i,j; register set *ph; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"testcanlab"); DYNALLOC1(set,workset,workset_sz,m,"testcanlab"); #endif for (i = 0; i < n; ++i) workperm[lab[i]] = i; for (i = 0, ph = canong; i < n; ++i, ph += M) { permset(GRAPHROW(g,lab[i],M),workset,M,workperm); for (j = 0; j < M; ++j) if (workset[j] < ph[j]) { *samerows = i; return -1; } else if (workset[j] > ph[j]) { *samerows = i; return 1; } } *samerows = n; return 0; } /***************************************************************************** * * * updatecan(g,canong,lab,samerows,m,n) sets canong = g^lab, assuming * * the first samerows of canong are ok already. * * * * GLOBALS ACCESSED: permset(),workperm<rw> * * * *****************************************************************************/ void updatecan(graph *g, graph *canong, permutation *lab, int samerows, int m, int n) { register int i; register set *ph; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"updatecan"); #endif for (i = 0; i < n; ++i) workperm[lab[i]] = i; for (i = samerows, ph = GRAPHROW(canong,samerows,M); i < n; ++i, ph += M) permset(GRAPHROW(g,lab[i],M),ph,M,workperm); } /***************************************************************************** * * * refine(g,lab,ptn,level,numcells,count,active,code,m,n) performs a * * refinement operation on the partition at the specified level of the * * partition nest (lab,ptn). *numcells is assumed to contain the number of * * cells on input, and is updated. The initial set of active cells (alpha * * in the paper) is specified in the set active. Precisely, x is in active * * iff the cell starting at index x in lab is active. * * The resulting partition is equitable if active is correct (see the paper * * and the Guide). * * *code is set to a value which depends on the fine detail of the * * algorithm, but which is independent of the labelling of the graph. * * count is used for work space. * * * * GLOBALS ACCESSED: workset<w>,bit<r>,nextelement(),bucket<w>,workperm<w> * * * *****************************************************************************/ void refine(graph *g, int *lab, int *ptn, int level, int *numcells, permutation *count, set *active, int *code, int m, int n) { #if MAXM==1 refine1(g,lab,ptn,level,numcells,count,active,code,m,n); } #else register int i,c1,c2,labc1; register setword x; register set *set1,*set2; int split1,split2,cell1,cell2; int cnt,bmin,bmax; long longcode; set *gptr; int maxcell,maxpos,hint; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine"); DYNALLOC1(set,workset,workset_sz,m,"refine"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine"); #endif longcode = *numcells; hint = 0; while (*numcells < n && ((split1 = hint, ISELEMENT(active,split1)) || (split1 = nextelement(active,M,split1)) >= 0 || (split1 = nextelement(active,M,-1)) >= 0)) { DELELEMENT(active,split1); for (split2 = split1; ptn[split2] > level; ++split2) {} longcode = MASH(longcode,split1+split2); if (split1 == split2) /* trivial splitting cell */ { gptr = GRAPHROW(g,lab[split1],M); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; c1 = cell1; c2 = cell2; while (c1 <= c2) { labc1 = lab[c1]; if (ISELEMENT(gptr,labc1)) ++c1; else { lab[c1] = lab[c2]; lab[c2] = labc1; --c2; } } if (c2 >= cell1 && c1 <= cell2) { ptn[c2] = level; longcode = MASH(longcode,c2); ++*numcells; if (ISELEMENT(active,cell1) || c2-cell1 >= cell2-c1) { ADDELEMENT(active,c1); if (c1 == cell2) hint = c1; } else { ADDELEMENT(active,cell1); if (c2 == cell1) hint = cell1; } } } } else /* nontrivial splitting cell */ { EMPTYSET(workset,m); for (i = split1; i <= split2; ++i) ADDELEMENT(workset,lab[i]); longcode = MASH(longcode,split2-split1+1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; i = cell1; set1 = workset; set2 = GRAPHROW(g,lab[i],m); cnt = 0; for (c1 = m; --c1 >= 0;) if ((x = (*set1++) & (*set2++)) != 0) cnt += POPCOUNT(x); count[i] = bmin = bmax = cnt; bucket[cnt] = 1; while (++i <= cell2) { set1 = workset; set2 = GRAPHROW(g,lab[i],m); cnt = 0; for (c1 = m; --c1 >= 0;) if ((x = (*set1++) & (*set2++)) != 0) cnt += POPCOUNT(x); while (bmin > cnt) bucket[--bmin] = 0; while (bmax < cnt) bucket[++bmax] = 0; ++bucket[cnt]; count[i] = cnt; } if (bmin == bmax) { longcode = MASH(longcode,bmin+cell1); continue; } c1 = cell1; maxcell = -1; maxpos=0; // just to shut up gcc warning for (i = bmin; i <= bmax; ++i) if (bucket[i]) { c2 = c1 + bucket[i]; bucket[i] = c1; longcode = MASH(longcode,i+c1); if (c2-c1 > maxcell) { maxcell = c2-c1; maxpos = c1; } if (c1 != cell1) { ADDELEMENT(active,c1); if (c2-c1 == 1) hint = c1; ++*numcells; } if (c2 <= cell2) ptn[c2-1] = level; c1 = c2; } for (i = cell1; i <= cell2; ++i) workperm[bucket[count[i]]++] = lab[i]; for (i = cell1; i <= cell2; ++i) lab[i] = workperm[i]; if (!ISELEMENT(active,cell1)) { ADDELEMENT(active,cell1); DELELEMENT(active,maxpos); } } } } longcode = MASH(longcode,*numcells); *code = CLEANUP(longcode); } #endif /* else case of MAXM==1 */ /***************************************************************************** * * * refine1(g,lab,ptn,level,numcells,count,active,code,m,n) is the same as * * refine(g,lab,ptn,level,numcells,count,active,code,m,n), except that * * m==1 is assumed for greater efficiency. The results are identical in all * * respects. See refine (above) for the specs. * * * *****************************************************************************/ void refine1(graph *g, int *lab, int *ptn, int level, int *numcells, permutation *count, set *active, int *code, int m, int n) { register int i,c1,c2,labc1; register setword x; int split1,split2,cell1,cell2; int cnt,bmin,bmax; long longcode; set *gptr,workset0; int maxcell,maxpos,hint; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine1"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine1"); #endif longcode = *numcells; hint = 0; while (*numcells < n && ((split1 = hint, ISELEMENT1(active,split1)) || (split1 = nextelement(active,1,split1)) >= 0 || (split1 = nextelement(active,1,-1)) >= 0)) { DELELEMENT1(active,split1); for (split2 = split1; ptn[split2] > level; ++split2) {} longcode = MASH(longcode,split1+split2); if (split1 == split2) /* trivial splitting cell */ { gptr = GRAPHROW(g,lab[split1],1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; c1 = cell1; c2 = cell2; while (c1 <= c2) { labc1 = lab[c1]; if (ISELEMENT1(gptr,labc1)) ++c1; else { lab[c1] = lab[c2]; lab[c2] = labc1; --c2; } } if (c2 >= cell1 && c1 <= cell2) { ptn[c2] = level; longcode = MASH(longcode,c2); ++*numcells; if (ISELEMENT1(active,cell1) || c2-cell1 >= cell2-c1) { ADDELEMENT1(active,c1); if (c1 == cell2) hint = c1; } else { ADDELEMENT1(active,cell1); if (c2 == cell1) hint = cell1; } } } } else /* nontrivial splitting cell */ { workset0 = 0; for (i = split1; i <= split2; ++i) ADDELEMENT1(&workset0,lab[i]); longcode = MASH(longcode,split2-split1+1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; i = cell1; if ((x = workset0 & g[lab[i]]) != 0) cnt = POPCOUNT(x); else cnt = 0; count[i] = bmin = bmax = cnt; bucket[cnt] = 1; while (++i <= cell2) { if ((x = workset0 & g[lab[i]]) != 0) cnt = POPCOUNT(x); else cnt = 0; while (bmin > cnt) bucket[--bmin] = 0; while (bmax < cnt) bucket[++bmax] = 0; ++bucket[cnt]; count[i] = cnt; } if (bmin == bmax) { longcode = MASH(longcode,bmin+cell1); continue; } c1 = cell1; maxcell = -1; maxpos=0; // only needed to silence gcc warning for (i = bmin; i <= bmax; ++i) if (bucket[i]) { c2 = c1 + bucket[i]; bucket[i] = c1; longcode = MASH(longcode,i+c1); if (c2-c1 > maxcell) { maxcell = c2-c1; maxpos = c1; } if (c1 != cell1) { ADDELEMENT1(active,c1); if (c2-c1 == 1) hint = c1; ++*numcells; } if (c2 <= cell2) ptn[c2-1] = level; c1 = c2; } for (i = cell1; i <= cell2; ++i) workperm[bucket[count[i]]++] = lab[i]; for (i = cell1; i <= cell2; ++i) lab[i] = workperm[i]; if (!ISELEMENT1(active,cell1)) { ADDELEMENT1(active,cell1); DELELEMENT1(active,maxpos); } } } } longcode = MASH(longcode,*numcells); *code = CLEANUP(longcode); } /***************************************************************************** * * * cheapautom(ptn,level,digraph,n) returns TRUE if the partition at the * * specified level in the partition nest (lab,ptn) {lab is not needed here} * * satisfies a simple sufficient condition for its cells to be the orbits of * * some subgroup of the automorphism group. Otherwise it returns FALSE. * * It always returns FALSE if digraph!=FALSE. * * * * nauty assumes that this function will always return TRUE for any * * partition finer than one for which it returns TRUE. * * * *****************************************************************************/ boolean cheapautom(int *ptn, int level, boolean digraph, int n) { register int i,k,nnt; if (digraph) return FALSE; k = n; nnt = 0; for (i = 0; i < n; ++i) { --k; if (ptn[i] > level) { ++nnt; while (ptn[++i] > level) {} } } return (k <= nnt + 1 || k <= 4); } /***************************************************************************** * * * bestcell(g,lab,ptn,level,tc_level,m,n) returns the index in lab of the * * start of the "best non-singleton cell" for fixing. If there is no * * non-singleton cell it returns n. * * This implementation finds the first cell which is non-trivially joined * * to the greatest number of other cells. * * * * GLOBALS ACCESSED: bit<r>,workperm<rw>,workset<rw>,bucket<rw> * * * *****************************************************************************/ int bestcell(graph *g, int *lab, int *ptn, int level, int tc_level, int m, int n) { register int i; set *gp; register setword setword1,setword2; int v1,v2,nnt; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine"); DYNALLOC1(set,workset,workset_sz,m,"refine"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine"); #endif /* find non-singleton cells: put starts in workperm[0..nnt-1] */ i = nnt = 0; while (i < n) { if (ptn[i] > level) { workperm[nnt++] = i; while (ptn[i] > level) ++i; } ++i; } if (nnt == 0) return n; /* set bucket[i] to # non-trivial neighbours of n.s. cell i */ for (i = nnt; --i >= 0;) bucket[i] = 0; for (v2 = 1; v2 < nnt; ++v2) { EMPTYSET(workset,m); i = workperm[v2] - 1; do { ++i; ADDELEMENT(workset,lab[i]); } while (ptn[i] > level); for (v1 = 0; v1 < v2; ++v1) { gp = GRAPHROW(g,lab[workperm[v1]],m); #if MAXM==1 setword1 = *workset & *gp; setword2 = *workset & ~*gp; #else setword1 = setword2 = 0; for (i = m; --i >= 0;) { setword1 |= workset[i] & gp[i]; setword2 |= workset[i] & ~gp[i]; } #endif if (setword1 != 0 && setword2 != 0) { ++bucket[v1]; ++bucket[v2]; } } } /* find first greatest bucket value */ v1 = 0; v2 = bucket[0]; for (i = 1; i < nnt; ++i) if (bucket[i] > v2) { v1 = i; v2 = bucket[i]; } return (int)workperm[v1]; } /***************************************************************************** * * * naugraph_check() checks that this file is compiled compatibly with the * * given parameters. If not, call exit(1). * * * *****************************************************************************/ void naugraph_check(int wordsize, int m, int n, int version) { if (wordsize != WORDSIZE) { fprintf(ERRFILE,"Error: WORDSIZE mismatch in naugraph.c\n"); exit(1); } #if MAXN if (m > MAXM) { fprintf(ERRFILE,"Error: MAXM inadequate in naugraph.c\n"); exit(1); } if (n > MAXN) { fprintf(ERRFILE,"Error: MAXN inadequate in naugraph.c\n"); exit(1); } #endif #ifdef BIGNAUTY if ((version & 1) == 0) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in naugraph.c\n"); exit(1); } #else if ((version & 1) == 1) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in naugraph.c\n"); exit(1); } #endif if (version < NAUTYREQUIRED) { fprintf(ERRFILE,"Error: naugraph.c version mismatch\n"); exit(1); } } /***************************************************************************** * * * naugraph_freedyn() - free the dynamic memory in this module * * * *****************************************************************************/ void naugraph_freedyn(void) { #if !MAXN DYNFREE(workset,workset_sz); DYNFREE(workperm,workperm_sz); DYNFREE(bucket,bucket_sz); #endif }
#define ONE_WORD_SETS #include "nauty.h" /* macros for hash-codes: */ #define MASH(l,i) ((((l) ^ 065435) + (i)) & 077777) /* : expression whose long value depends only on long l and int/long i. Anything goes, preferably non-commutative. */ #define CLEANUP(l) ((int)((l) % 077777)) /* : expression whose value depends on long l and is less than 077777 when converted to int then short. Anything goes. */ #if MAXM==1 #define M 1 #else #define M m #endif /* aproto: header new_nauty_protos.h */ dispatchvec dispatch_graph = {isautom,testcanlab,updatecan,refine,refine1,cheapautom,bestcell, naugraph_freedyn,naugraph_check,NULL,NULL}; #if !MAXN DYNALLSTAT(set,workset,workset_sz); DYNALLSTAT(permutation,workperm,workperm_sz); DYNALLSTAT(int,bucket,bucket_sz); #else static set workset[MAXM]; /* used for scratch work */ static permutation workperm[MAXN]; static int bucket[MAXN+2]; #endif /***************************************************************************** * * * isautom(g,perm,digraph,m,n) = TRUE iff perm is an automorphism of g * * (i.e., g^perm = g). Symmetry is assumed unless digraph = TRUE. * * * *****************************************************************************/ boolean isautom(graph *g, permutation *perm, boolean digraph, int m, int n) { boolean autom=TRUE; #ifdef _OPENMP #pragma omp parallel #endif { int stride=1, offs=0; register set *pg; register int pos; set *pgp; int posp,i; #ifdef _OPENMP offs=omp_get_thread_num(); stride=omp_get_num_threads(); #endif for (i = offs; autom && i < n; i+=stride) { pg=g+M*i; pgp = GRAPHROW(g,perm[i],M); pos = (digraph ? -1 : i); while ((pos = nextelement(pg,M,pos)) >= 0) { posp = perm[pos]; if (!ISELEMENT(pgp,posp)) autom=FALSE; } } } return autom; } /***************************************************************************** * * * testcanlab(g,canong,lab,samerows,m,n) compares g^lab to canong, * * using an ordering which is immaterial since it's only used here. The * * value returned is -1,0,1 if g^lab <,=,> canong. *samerows is set to * * the number of rows (0..n) of canong which are the same as those of g^lab. * * * * GLOBALS ACCESSED: workset<rw>,permset(),workperm<rw> * * * *****************************************************************************/ int testcanlab(graph *g, graph *canong, int *lab, int *samerows, int m, int n) { register int i,j; register set *ph; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"testcanlab"); DYNALLOC1(set,workset,workset_sz,m,"testcanlab"); #endif for (i = 0; i < n; ++i) workperm[lab[i]] = i; for (i = 0, ph = canong; i < n; ++i, ph += M) { permset(GRAPHROW(g,lab[i],M),workset,M,workperm); for (j = 0; j < M; ++j) if (workset[j] < ph[j]) { *samerows = i; return -1; } else if (workset[j] > ph[j]) { *samerows = i; return 1; } } *samerows = n; return 0; } /***************************************************************************** * * * updatecan(g,canong,lab,samerows,m,n) sets canong = g^lab, assuming * * the first samerows of canong are ok already. * * * * GLOBALS ACCESSED: permset(),workperm<rw> * * * *****************************************************************************/ void updatecan(graph *g, graph *canong, permutation *lab, int samerows, int m, int n) { register int i; register set *ph; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"updatecan"); #endif for (i = 0; i < n; ++i) workperm[lab[i]] = i; for (i = samerows, ph = GRAPHROW(canong,samerows,M); i < n; ++i, ph += M) permset(GRAPHROW(g,lab[i],M),ph,M,workperm); } /***************************************************************************** * * * refine(g,lab,ptn,level,numcells,count,active,code,m,n) performs a * * refinement operation on the partition at the specified level of the * * partition nest (lab,ptn). *numcells is assumed to contain the number of * * cells on input, and is updated. The initial set of active cells (alpha * * in the paper) is specified in the set active. Precisely, x is in active * * iff the cell starting at index x in lab is active. * * The resulting partition is equitable if active is correct (see the paper * * and the Guide). * * *code is set to a value which depends on the fine detail of the * * algorithm, but which is independent of the labelling of the graph. * * count is used for work space. * * * * GLOBALS ACCESSED: workset<w>,bit<r>,nextelement(),bucket<w>,workperm<w> * * * *****************************************************************************/ void refine(graph *g, int *lab, int *ptn, int level, int *numcells, permutation *count, set *active, int *code, int m, int n) { #if MAXM==1 refine1(g,lab,ptn,level,numcells,count,active,code,m,n); } #else register int i,c1,c2,labc1; register setword x; register set *set1,*set2; int split1,split2,cell1,cell2; int cnt,bmin,bmax; long longcode; set *gptr; int maxcell,maxpos,hint; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine"); DYNALLOC1(set,workset,workset_sz,m,"refine"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine"); #endif longcode = *numcells; hint = 0; while (*numcells < n && ((split1 = hint, ISELEMENT(active,split1)) || (split1 = nextelement(active,M,split1)) >= 0 || (split1 = nextelement(active,M,-1)) >= 0)) { DELELEMENT(active,split1); for (split2 = split1; ptn[split2] > level; ++split2) {} longcode = MASH(longcode,split1+split2); if (split1 == split2) /* trivial splitting cell */ { gptr = GRAPHROW(g,lab[split1],M); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; c1 = cell1; c2 = cell2; while (c1 <= c2) { labc1 = lab[c1]; if (ISELEMENT(gptr,labc1)) ++c1; else { lab[c1] = lab[c2]; lab[c2] = labc1; --c2; } } if (c2 >= cell1 && c1 <= cell2) { ptn[c2] = level; longcode = MASH(longcode,c2); ++*numcells; if (ISELEMENT(active,cell1) || c2-cell1 >= cell2-c1) { ADDELEMENT(active,c1); if (c1 == cell2) hint = c1; } else { ADDELEMENT(active,cell1); if (c2 == cell1) hint = cell1; } } } } else /* nontrivial splitting cell */ { EMPTYSET(workset,m); for (i = split1; i <= split2; ++i) ADDELEMENT(workset,lab[i]); longcode = MASH(longcode,split2-split1+1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; i = cell1; set1 = workset; set2 = GRAPHROW(g,lab[i],m); cnt = 0; for (c1 = m; --c1 >= 0;) if ((x = (*set1++) & (*set2++)) != 0) cnt += POPCOUNT(x); count[i] = bmin = bmax = cnt; bucket[cnt] = 1; while (++i <= cell2) { set1 = workset; set2 = GRAPHROW(g,lab[i],m); cnt = 0; for (c1 = m; --c1 >= 0;) if ((x = (*set1++) & (*set2++)) != 0) cnt += POPCOUNT(x); while (bmin > cnt) bucket[--bmin] = 0; while (bmax < cnt) bucket[++bmax] = 0; ++bucket[cnt]; count[i] = cnt; } if (bmin == bmax) { longcode = MASH(longcode,bmin+cell1); continue; } c1 = cell1; maxcell = -1; maxpos=0; // just to shut up gcc warning for (i = bmin; i <= bmax; ++i) if (bucket[i]) { c2 = c1 + bucket[i]; bucket[i] = c1; longcode = MASH(longcode,i+c1); if (c2-c1 > maxcell) { maxcell = c2-c1; maxpos = c1; } if (c1 != cell1) { ADDELEMENT(active,c1); if (c2-c1 == 1) hint = c1; ++*numcells; } if (c2 <= cell2) ptn[c2-1] = level; c1 = c2; } for (i = cell1; i <= cell2; ++i) workperm[bucket[count[i]]++] = lab[i]; for (i = cell1; i <= cell2; ++i) lab[i] = workperm[i]; if (!ISELEMENT(active,cell1)) { ADDELEMENT(active,cell1); DELELEMENT(active,maxpos); } } } } longcode = MASH(longcode,*numcells); *code = CLEANUP(longcode); } #endif /* else case of MAXM==1 */ /***************************************************************************** * * * refine1(g,lab,ptn,level,numcells,count,active,code,m,n) is the same as * * refine(g,lab,ptn,level,numcells,count,active,code,m,n), except that * * m==1 is assumed for greater efficiency. The results are identical in all * * respects. See refine (above) for the specs. * * * *****************************************************************************/ void refine1(graph *g, int *lab, int *ptn, int level, int *numcells, permutation *count, set *active, int *code, int m, int n) { register int i,c1,c2,labc1; register setword x; int split1,split2,cell1,cell2; int cnt,bmin,bmax; long longcode; set *gptr,workset0; int maxcell,maxpos,hint; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine1"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine1"); #endif longcode = *numcells; hint = 0; while (*numcells < n && ((split1 = hint, ISELEMENT1(active,split1)) || (split1 = nextelement(active,1,split1)) >= 0 || (split1 = nextelement(active,1,-1)) >= 0)) { DELELEMENT1(active,split1); for (split2 = split1; ptn[split2] > level; ++split2) {} longcode = MASH(longcode,split1+split2); if (split1 == split2) /* trivial splitting cell */ { gptr = GRAPHROW(g,lab[split1],1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; c1 = cell1; c2 = cell2; while (c1 <= c2) { labc1 = lab[c1]; if (ISELEMENT1(gptr,labc1)) ++c1; else { lab[c1] = lab[c2]; lab[c2] = labc1; --c2; } } if (c2 >= cell1 && c1 <= cell2) { ptn[c2] = level; longcode = MASH(longcode,c2); ++*numcells; if (ISELEMENT1(active,cell1) || c2-cell1 >= cell2-c1) { ADDELEMENT1(active,c1); if (c1 == cell2) hint = c1; } else { ADDELEMENT1(active,cell1); if (c2 == cell1) hint = cell1; } } } } else /* nontrivial splitting cell */ { workset0 = 0; for (i = split1; i <= split2; ++i) ADDELEMENT1(&workset0,lab[i]); longcode = MASH(longcode,split2-split1+1); for (cell1 = 0; cell1 < n; cell1 = cell2 + 1) { for (cell2 = cell1; ptn[cell2] > level; ++cell2) {} if (cell1 == cell2) continue; i = cell1; if ((x = workset0 & g[lab[i]]) != 0) cnt = POPCOUNT(x); else cnt = 0; count[i] = bmin = bmax = cnt; bucket[cnt] = 1; while (++i <= cell2) { if ((x = workset0 & g[lab[i]]) != 0) cnt = POPCOUNT(x); else cnt = 0; while (bmin > cnt) bucket[--bmin] = 0; while (bmax < cnt) bucket[++bmax] = 0; ++bucket[cnt]; count[i] = cnt; } if (bmin == bmax) { longcode = MASH(longcode,bmin+cell1); continue; } c1 = cell1; maxcell = -1; maxpos=0; // only needed to silence gcc warning for (i = bmin; i <= bmax; ++i) if (bucket[i]) { c2 = c1 + bucket[i]; bucket[i] = c1; longcode = MASH(longcode,i+c1); if (c2-c1 > maxcell) { maxcell = c2-c1; maxpos = c1; } if (c1 != cell1) { ADDELEMENT1(active,c1); if (c2-c1 == 1) hint = c1; ++*numcells; } if (c2 <= cell2) ptn[c2-1] = level; c1 = c2; } for (i = cell1; i <= cell2; ++i) workperm[bucket[count[i]]++] = lab[i]; for (i = cell1; i <= cell2; ++i) lab[i] = workperm[i]; if (!ISELEMENT1(active,cell1)) { ADDELEMENT1(active,cell1); DELELEMENT1(active,maxpos); } } } } longcode = MASH(longcode,*numcells); *code = CLEANUP(longcode); } /***************************************************************************** * * * cheapautom(ptn,level,digraph,n) returns TRUE if the partition at the * * specified level in the partition nest (lab,ptn) {lab is not needed here} * * satisfies a simple sufficient condition for its cells to be the orbits of * * some subgroup of the automorphism group. Otherwise it returns FALSE. * * It always returns FALSE if digraph!=FALSE. * * * * nauty assumes that this function will always return TRUE for any * * partition finer than one for which it returns TRUE. * * * *****************************************************************************/ boolean cheapautom(int *ptn, int level, boolean digraph, int n) { register int i,k,nnt; if (digraph) return FALSE; k = n; nnt = 0; for (i = 0; i < n; ++i) { --k; if (ptn[i] > level) { ++nnt; while (ptn[++i] > level) {} } } return (k <= nnt + 1 || k <= 4); } /***************************************************************************** * * * bestcell(g,lab,ptn,level,tc_level,m,n) returns the index in lab of the * * start of the "best non-singleton cell" for fixing. If there is no * * non-singleton cell it returns n. * * This implementation finds the first cell which is non-trivially joined * * to the greatest number of other cells. * * * * GLOBALS ACCESSED: bit<r>,workperm<rw>,workset<rw>,bucket<rw> * * * *****************************************************************************/ int bestcell(graph *g, int *lab, int *ptn, int level, int tc_level, int m, int n) { register int i; set *gp; register setword setword1,setword2; int v1,v2,nnt; #if !MAXN DYNALLOC1(permutation,workperm,workperm_sz,n,"refine"); DYNALLOC1(set,workset,workset_sz,m,"refine"); DYNALLOC1(int,bucket,bucket_sz,n+2,"refine"); #endif /* find non-singleton cells: put starts in workperm[0..nnt-1] */ i = nnt = 0; while (i < n) { if (ptn[i] > level) { workperm[nnt++] = i; while (ptn[i] > level) ++i; } ++i; } if (nnt == 0) return n; /* set bucket[i] to # non-trivial neighbours of n.s. cell i */ for (i = nnt; --i >= 0;) bucket[i] = 0; for (v2 = 1; v2 < nnt; ++v2) { EMPTYSET(workset,m); i = workperm[v2] - 1; do { ++i; ADDELEMENT(workset,lab[i]); } while (ptn[i] > level); for (v1 = 0; v1 < v2; ++v1) { gp = GRAPHROW(g,lab[workperm[v1]],m); #if MAXM==1 setword1 = *workset & *gp; setword2 = *workset & ~*gp; #else setword1 = setword2 = 0; for (i = m; --i >= 0;) { setword1 |= workset[i] & gp[i]; setword2 |= workset[i] & ~gp[i]; } #endif if (setword1 != 0 && setword2 != 0) { ++bucket[v1]; ++bucket[v2]; } } } /* find first greatest bucket value */ v1 = 0; v2 = bucket[0]; for (i = 1; i < nnt; ++i) if (bucket[i] > v2) { v1 = i; v2 = bucket[i]; } return (int)workperm[v1]; } /***************************************************************************** * * * naugraph_check() checks that this file is compiled compatibly with the * * given parameters. If not, call exit(1). * * * *****************************************************************************/ void naugraph_check(int wordsize, int m, int n, int version) { if (wordsize != WORDSIZE) { fprintf(ERRFILE,"Error: WORDSIZE mismatch in naugraph.c\n"); exit(1); } #if MAXN if (m > MAXM) { fprintf(ERRFILE,"Error: MAXM inadequate in naugraph.c\n"); exit(1); } if (n > MAXN) { fprintf(ERRFILE,"Error: MAXN inadequate in naugraph.c\n"); exit(1); } #endif #ifdef BIGNAUTY if ((version & 1) == 0) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in naugraph.c\n"); exit(1); } #else if ((version & 1) == 1) { fprintf(ERRFILE,"Error: BIGNAUTY mismatch in naugraph.c\n"); exit(1); } #endif if (version < NAUTYREQUIRED) { fprintf(ERRFILE,"Error: naugraph.c version mismatch\n"); exit(1); } } /***************************************************************************** * * * naugraph_freedyn() - free the dynamic memory in this module * * * *****************************************************************************/ void naugraph_freedyn(void) { #if !MAXN DYNFREE(workset,workset_sz); DYNFREE(workperm,workperm_sz); DYNFREE(bucket,bucket_sz); #endif }
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N RRR L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; const Image *next; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError,"ImagesAreNotTheSameSize"); } combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse) { InheritException(exception,&combine_image->exception); combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(combine_image,sRGBColorspace); if ((channel & OpacityChannel) != 0) combine_image->matte=MagickTrue; (void) SetImageBackgroundColor(combine_image); /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; PixelPacket *pixels; register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } next=image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket *indexes; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(combine_view); for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p))); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CombineImageTag,progress++, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void) TransformImageColorspace(combine_image,sRGBColorspace); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); return(image->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImageChannel() separates a channel from the image and returns it as % a grayscale image. A channel is a particular color component of each pixel % in the image. % % The format of the SeparateImageChannel method is: % % MagickBooleanType SeparateImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % */ MagickExport Image *SeparateImage(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *separate_image; MagickBooleanType status; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); status=SeparateImageChannel(separate_image,channel); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image *image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (channel == GrayChannels) image->matte=MagickTrue; /* Separate image channels. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelGreen(q)); SetPixelBlue(q,GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelBlue(q)); SetPixelGreen(q,GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIndex(indexes+x)); SetPixelGreen(q,GetPixelIndex(indexes+x)); SetPixelBlue(q,GetPixelIndex(indexes+x)); q++; } break; } case TrueAlphaChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelAlpha(q)); SetPixelGreen(q,GetPixelAlpha(q)); SetPixelBlue(q,GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SeparateImageChannel) #endif proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (channel != GrayChannels) image->matte=MagickFalse; (void) SetImageColorspace(image,GRAYColorspace); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % MagickBooleanType SeparateImages(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channels to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); if ((channel & RedChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,RedChannel); AppendImageToList(&images,separate_image); } if ((channel & GreenChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,GreenChannel); AppendImageToList(&images,separate_image); } if ((channel & BlueChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlueChannel); AppendImageToList(&images,separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlackChannel); AppendImageToList(&images,separate_image); } if ((channel & AlphaChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,TrueAlphaChannel); AppendImageToList(&images,separate_image); } return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelType alpha_type) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % CopyAlphaChannel, DeactivateAlphaChannel, ExtractAlphaChannel, % OpaqueAlphaChannel, ResetAlphaChannel, SetAlphaChannel, % ShapeAlphaChannel, and TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelType alpha_type) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->matte=MagickTrue; break; } case BackgroundAlphaChannel: { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q,pixel.red); SetPixelGreen(q,pixel.green); SetPixelBlue(q,pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* Special usage case for SeparateImageChannel(): copy grayscale color to the alpha channel. */ status=SeparateImageChannel(image,GrayChannels); image->matte=MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* Reset all color channels to background color. */ GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *) NULL,&background); (void) LevelColorsImage(image,&background,&background,MagickTrue); } break; } case DeactivateAlphaChannel: { image->matte=MagickFalse; break; } case ExtractAlphaChannel: { status=SeparateImageChannel(image,TrueAlphaChannel); image->matte=MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity; opacity=(double) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity,(MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity,(MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity,(MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity=ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status=SetImageOpacity(image,OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status=SetImageOpacity(image,OpaqueOpacity); break; } case TransparentAlphaChannel: { status=SetImageOpacity(image,TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); return(SyncImagePixelCache(image,&image->exception)); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m b i n e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CombineImages() combines one or more images into a single image. * The % grayscale value of the pixels of each image in the sequence is * assigned in % order to the specified channels of the combined image. * The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, * etc. % % The format of the CombineImages method is: % % Image * *CombineImages(const Image *image,const ChannelType channel, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * CombineImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { #define CombineImageTag "Combine/Image" CacheView * combine_view; const Image * next; Image * combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Ensure the image are the same size. */ assert(image != (const Image *)NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next = image; next != (Image *) NULL; next = GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError, "ImagesAreNotTheSameSize"); } combine_image = CloneImage(image, 0, 0, MagickTrue, exception); if (combine_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(combine_image, DirectClass) == MagickFalse) { InheritException(exception, &combine_image->exception); combine_image = DestroyImage(combine_image); return ((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) (void)SetImageColorspace(combine_image, sRGBColorspace); if ((channel & OpacityChannel) != 0) combine_image->matte = MagickTrue; (void)SetImageBackgroundColor(combine_image); /* * Combine images. */ status = MagickTrue; progress = 0; combine_view = AcquireAuthenticCacheView(combine_image, exception); for (y = 0; y < (ssize_t) combine_image->rows; y++) { CacheView * image_view; const Image * next; PixelPacket * pixels; register const PixelPacket * restrict p; register PixelPacket * restrict q; register ssize_t x; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(combine_view, 0, y, combine_image->columns, 1, exception); if (pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } next = image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket * indexes; image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; indexes = GetCacheViewAuthenticIndexQueue(combine_view); for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes + x, ClampToQuantum(GetPixelIntensity(image, p))); p++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, CombineImageTag, progress++, combine_image->rows); if (proceed == MagickFalse) status = MagickFalse; } } combine_view = DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void)TransformImageColorspace(combine_image, sRGBColorspace); if (status == MagickFalse) combine_image = DestroyImage(combine_image); return (combine_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha * channel is % not activated. That is, the image is RGB rather than RGBA * or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel * method is: % % MagickBooleanType GetImageAlphaChannel(const Image * *image) % % A description of each parameter follows: % % o image: the * image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image * image) { assert(image != (const Image *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickSignature); return (image->matte); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImageChannel() separates a channel from the image and * returns it as % a grayscale image. A channel is a particular color * component of each pixel % in the image. % % The format of the * SeparateImageChannel method is: % % MagickBooleanType * SeparateImageChannel(Image *image, % const ChannelType channel) % % * A description of each parameter follows: % % o image: the image. % % * o channel: Identify which channel to extract: RedChannel, GreenChannel, % * BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % * YellowChannel, or BlackChannel. % */ MagickExport Image * SeparateImage(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * separate_image; MagickBooleanType status; /* * Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); separate_image = CloneImage(image, 0, 0, MagickTrue, exception); if (separate_image == (Image *) NULL) return ((Image *) NULL); status = SeparateImageChannel(separate_image, channel); if (status == MagickFalse) separate_image = DestroyImage(separate_image); return (separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image * image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); if (channel == GrayChannels) image->matte = MagickTrue; /* * Separate image channels. */ status = MagickTrue; progress = 0; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register IndexPacket * restrict indexes; register PixelPacket * restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q, GetPixelRed(q)); SetPixelBlue(q, GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelGreen(q)); SetPixelBlue(q, GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelBlue(q)); SetPixelGreen(q, GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelOpacity(q)); SetPixelGreen(q, GetPixelOpacity(q)); SetPixelBlue(q, GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelIndex(indexes + x)); SetPixelGreen(q, GetPixelIndex(indexes + x)); SetPixelBlue(q, GetPixelIndex(indexes + x)); q++; } break; } case TrueAlphaChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelAlpha(q)); SetPixelGreen(q, GetPixelAlpha(q)); SetPixelBlue(q, GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, SeparateImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); if (channel != GrayChannels) image->matte = MagickFalse; (void)SetImageColorspace(image, GRAYColorspace); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImages() returns a separate grayscale image for each * channel % specified. % % The format of the SeparateImages method is: % % * MagickBooleanType SeparateImages(const Image *image, % const * ChannelType channel,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o channel: Identify * which channels to extract: RedChannel, GreenChannel, % BlueChannel, * OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or * BlackChannel. % % o exception: return any errors or warnings in this * structure. % */ MagickExport Image * SeparateImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); images = NewImageList(); if ((channel & RedChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, RedChannel); AppendImageToList(&images, separate_image); } if ((channel & GreenChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, GreenChannel); AppendImageToList(&images, separate_image); } if ((channel & BlueChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlueChannel); AppendImageToList(&images, separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlackChannel); AppendImageToList(&images, separate_image); } if ((channel & AlphaChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, TrueAlphaChannel); AppendImageToList(&images, separate_image); } return (images); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets * the alpha % channel. % % The format of the SetImageAlphaChannel method * is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % * const AlphaChannelType alpha_type) % % A description of each parameter * follows: % % o image: the image. % % o alpha_type: The alpha * channel type: ActivateAlphaChannel, % CopyAlphaChannel, * DeactivateAlphaChannel, ExtractAlphaChannel, % OpaqueAlphaChannel, * ResetAlphaChannel, SetAlphaChannel, % ShapeAlphaChannel, and * TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image * image, const AlphaChannelType alpha_type) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickSignature); status = MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->matte = MagickTrue; break; } case BackgroundAlphaChannel: { CacheView * image_view; ExceptionInfo * exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* * Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image, DirectClass) == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index = 0; SetPixelPacket(image, &background, &pixel, &index); status = MagickTrue; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register IndexPacket * restrict indexes; register PixelPacket * restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q, pixel.red); SetPixelGreen(q, pixel.green); SetPixelBlue(q, pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* * Special usage case for SeparateImageChannel(): copy grayscale * color to the alpha channel. */ status = SeparateImageChannel(image, GrayChannels); image->matte = MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* * Reset all color channels to background color. */ GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &(image->background_color), (IndexPacket *) NULL, &background); (void)LevelColorsImage(image, &background, &background, MagickTrue); } break; } case DeactivateAlphaChannel: { image->matte = MagickFalse; break; } case ExtractAlphaChannel: { status = SeparateImageChannel(image, TrueAlphaChannel); image->matte = MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { CacheView * image_view; ExceptionInfo * exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* * Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image, DirectClass) == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index = 0; SetPixelPacket(image, &background, &pixel, &index); status = MagickTrue; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); for (y = 0; y < (ssize_t) image->rows; y++) { register IndexPacket * restrict indexes; register PixelPacket * restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma = 1.0 - QuantumScale * QuantumScale * q->opacity * pixel.opacity; opacity = (double)QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); q->red = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity, (MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity, (MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity, (MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity = ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status = SetImageOpacity(image, OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status = SetImageOpacity(image, OpaqueOpacity); break; } case TransparentAlphaChannel: { status = SetImageOpacity(image, TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return (status); return (SyncImagePixelCache(image, &image->exception)); }
/* * Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % C o m b i n e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % CombineImages() combines one or more images into a single image. * The % grayscale value of the pixels of each image in the sequence is * assigned in % order to the specified channels of the combined image. * The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, * etc. % % The format of the CombineImages method is: % % Image * *CombineImages(const Image *image,const ChannelType channel, % * ExceptionInfo *exception) % % A description of each parameter follows: % * % o image: the image. % % o exception: return any errors or warnings * in this structure. % */ MagickExport Image * CombineImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { #define CombineImageTag "Combine/Image" CacheView * combine_view; const Image * next; Image * combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* * Ensure the image are the same size. */ assert(image != (const Image *)NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next = image; next != (Image *) NULL; next = GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError, "ImagesAreNotTheSameSize"); } combine_image = CloneImage(image, 0, 0, MagickTrue, exception); if (combine_image == (Image *) NULL) return ((Image *) NULL); if (SetImageStorageClass(combine_image, DirectClass) == MagickFalse) { InheritException(exception, &combine_image->exception); combine_image = DestroyImage(combine_image); return ((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) (void)SetImageColorspace(combine_image, sRGBColorspace); if ((channel & OpacityChannel) != 0) combine_image->matte = MagickTrue; (void)SetImageBackgroundColor(combine_image); /* * Combine images. */ status = MagickTrue; progress = 0; combine_view = AcquireAuthenticCacheView(combine_image, exception); for (y = 0; y < (ssize_t) combine_image->rows; y++) { CacheView * image_view; const Image * next; PixelPacket * pixels; register const PixelPacket * restrict p; register PixelPacket * restrict q; register ssize_t x; if (status == MagickFalse) continue; pixels = GetCacheViewAuthenticPixels(combine_view, 0, y, combine_image->columns, 1, exception); if (pixels == (PixelPacket *) NULL) { status = MagickFalse; continue; } next = image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; q = pixels; for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, p))); p++; q++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket * indexes; image_view = AcquireVirtualCacheView(next, exception); p = GetCacheViewVirtualPixels(image_view, 0, y, next->columns, 1, exception); if (p == (const PixelPacket *)NULL) continue; indexes = GetCacheViewAuthenticIndexQueue(combine_view); for (x = 0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes + x, ClampToQuantum(GetPixelIntensity(image, p))); p++; } image_view = DestroyCacheView(image_view); next = GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed = SetImageProgress(image, CombineImageTag, progress++, combine_image->rows); if (proceed == MagickFalse) status = MagickFalse; } } combine_view = DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void)TransformImageColorspace(combine_image, sRGBColorspace); if (status == MagickFalse) combine_image = DestroyImage(combine_image); return (combine_image); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % G e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha * channel is % not activated. That is, the image is RGB rather than RGBA * or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel * method is: % % MagickBooleanType GetImageAlphaChannel(const Image * *image) % % A description of each parameter follows: % % o image: the * image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image * image) { assert(image != (const Image *)NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickSignature); return (image->matte); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImageChannel() separates a channel from the image and * returns it as % a grayscale image. A channel is a particular color * component of each pixel % in the image. % % The format of the * SeparateImageChannel method is: % % MagickBooleanType * SeparateImageChannel(Image *image, % const ChannelType channel) % % * A description of each parameter follows: % % o image: the image. % % * o channel: Identify which channel to extract: RedChannel, GreenChannel, % * BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % * YellowChannel, or BlackChannel. % */ MagickExport Image * SeparateImage(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * separate_image; MagickBooleanType status; /* * Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); separate_image = CloneImage(image, 0, 0, MagickTrue, exception); if (separate_image == (Image *) NULL) return ((Image *) NULL); status = SeparateImageChannel(separate_image, channel); if (status == MagickFalse) separate_image = DestroyImage(separate_image); return (separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image * image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView * image_view; ExceptionInfo * exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); if (SetImageStorageClass(image, DirectClass) == MagickFalse) return (MagickFalse); if (channel == GrayChannels) image->matte = MagickTrue; /* * Separate image channels. */ status = MagickTrue; progress = 0; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register IndexPacket * restrict indexes; register PixelPacket * restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } indexes = GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q, GetPixelRed(q)); SetPixelBlue(q, GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelGreen(q)); SetPixelBlue(q, GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelBlue(q)); SetPixelGreen(q, GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelOpacity(q)); SetPixelGreen(q, GetPixelOpacity(q)); SetPixelBlue(q, GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelIndex(indexes + x)); SetPixelGreen(q, GetPixelIndex(indexes + x)); SetPixelBlue(q, GetPixelIndex(indexes + x)); q++; } break; } case TrueAlphaChannel: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelRed(q, GetPixelAlpha(q)); SetPixelGreen(q, GetPixelAlpha(q)); SetPixelBlue(q, GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x = 0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q, ClampToQuantum(GetPixelIntensity(image, q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SeparateImageChannel) #endif proceed = SetImageProgress(image, SeparateImageTag, progress++, image->rows); if (proceed == MagickFalse) status = MagickFalse; } } image_view = DestroyCacheView(image_view); if (channel != GrayChannels) image->matte = MagickFalse; (void)SetImageColorspace(image, GRAYColorspace); return (status); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e p a r a t e I m a g e s * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SeparateImages() returns a separate grayscale image for each * channel % specified. % % The format of the SeparateImages method is: % % * MagickBooleanType SeparateImages(const Image *image, % const * ChannelType channel,ExceptionInfo *exception) % % A description of each * parameter follows: % % o image: the image. % % o channel: Identify * which channels to extract: RedChannel, GreenChannel, % BlueChannel, * OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or * BlackChannel. % % o exception: return any errors or warnings in this * structure. % */ MagickExport Image * SeparateImages(const Image * image, const ChannelType channel, ExceptionInfo * exception) { Image * images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image->filename); images = NewImageList(); if ((channel & RedChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, RedChannel); AppendImageToList(&images, separate_image); } if ((channel & GreenChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, GreenChannel); AppendImageToList(&images, separate_image); } if ((channel & BlueChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlueChannel); AppendImageToList(&images, separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, BlackChannel); AppendImageToList(&images, separate_image); } if ((channel & AlphaChannel) != 0) { separate_image = CloneImage(image, 0, 0, MagickTrue, exception); (void)SeparateImageChannel(separate_image, TrueAlphaChannel); AppendImageToList(&images, separate_image); } return (images); } /* * * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%% % * % % * % % * % % S e t I m a g e A l p h a C h a n n e l * % % * % % * % % * % * %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% * %%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets * the alpha % channel. % % The format of the SetImageAlphaChannel method * is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % * const AlphaChannelType alpha_type) % % A description of each parameter * follows: % % o image: the image. % % o alpha_type: The alpha * channel type: ActivateAlphaChannel, % CopyAlphaChannel, * DeactivateAlphaChannel, ExtractAlphaChannel, % OpaqueAlphaChannel, * ResetAlphaChannel, SetAlphaChannel, % ShapeAlphaChannel, and * TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image * image, const AlphaChannelType alpha_type) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void)LogMagickEvent(TraceEvent, GetMagickModule(), "..."); assert(image->signature == MagickSignature); status = MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->matte = MagickTrue; break; } case BackgroundAlphaChannel: { CacheView * image_view; ExceptionInfo * exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* * Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image, DirectClass) == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index = 0; SetPixelPacket(image, &background, &pixel, &index); status = MagickTrue; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register IndexPacket * restrict indexes; register PixelPacket * restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q, pixel.red); SetPixelGreen(q, pixel.green); SetPixelBlue(q, pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* * Special usage case for SeparateImageChannel(): copy grayscale * color to the alpha channel. */ status = SeparateImageChannel(image, GrayChannels); image->matte = MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* * Reset all color channels to background color. */ GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &(image->background_color), (IndexPacket *) NULL, &background); (void)LevelColorsImage(image, &background, &background, MagickTrue); } break; } case DeactivateAlphaChannel: { image->matte = MagickFalse; break; } case ExtractAlphaChannel: { status = SeparateImageChannel(image, TrueAlphaChannel); image->matte = MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { CacheView * image_view; ExceptionInfo * exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* * Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image, DirectClass) == MagickFalse) break; GetMagickPixelPacket(image, &background); SetMagickPixelPacket(image, &image->background_color, (const IndexPacket *) NULL, &background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index = 0; SetPixelPacket(image, &background, &pixel, &index); status = MagickTrue; exception = (&image->exception); image_view = AcquireAuthenticCacheView(image, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y = 0; y < (ssize_t) image->rows; y++) { register IndexPacket * restrict indexes; register PixelPacket * restrict q; register ssize_t x; if (status == MagickFalse) continue; q = GetCacheViewAuthenticPixels(image_view, 0, y, image->columns, 1, exception); if (q == (PixelPacket *) NULL) { status = MagickFalse; continue; } for (x = 0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma = 1.0 - QuantumScale * QuantumScale * q->opacity * pixel.opacity; opacity = (double)QuantumRange *(1.0 - gamma); gamma = PerceptibleReciprocal(gamma); q->red = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity, (MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity, (MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue = ClampToQuantum(gamma * MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity, (MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity = ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes = GetCacheViewAuthenticIndexQueue(image_view); for (x = 0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes + x, index); } if (SyncCacheViewAuthenticPixels(image_view, exception) == MagickFalse) status = MagickFalse; } image_view = DestroyCacheView(image_view); return (status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status = SetImageOpacity(image, OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status = SetImageOpacity(image, OpaqueOpacity); break; } case TransparentAlphaChannel: { status = SetImageOpacity(image, TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return (status); return (SyncImagePixelCache(image, &image->exception)); }
pr70550-1.c
/* PR middle-end/70550 */ /* { dg-do compile } */ /* { dg-additional-options "-Wuninitialized" } */ #ifdef __SIZEOF_INT128__ typedef __int128 T; #else typedef long long T; #endif void bar (T); #pragma omp declare target (bar) void foo (void) { { int i; #pragma omp target defaultmap(tofrom:scalar) /* { dg-bogus "is used uninitialized" } */ { i = 26; bar (i); } } { T j; #pragma omp target defaultmap(tofrom:scalar) /* { dg-bogus "is used uninitialized" } */ { j = 37; bar (j); } } { int i; #pragma omp target /* { dg-bogus "is used uninitialized" } */ { i = 26; bar (i); } } { T j; #pragma omp target /* { dg-bogus "is used uninitialized" } */ { j = 37; bar (j); } } { int i; #pragma omp target firstprivate (i) /* { dg-warning "is used uninitialized" } */ { i = 26; bar (i); } } { T j; #pragma omp target firstprivate (j) /* { dg-warning "is used uninitialized" } */ { j = 37; bar (j); } } { int i; #pragma omp target private (i) /* { dg-bogus "is used uninitialized" } */ { i = 26; bar (i); } } { T j; #pragma omp target private (j) /* { dg-bogus "is used uninitialized" } */ { j = 37; bar (j); } } }
/* PR middle-end/70550 */ /* { dg-do compile } */ /* { dg-additional-options "-Wuninitialized" } */ #ifdef __SIZEOF_INT128__ typedef __int128 T; #else typedef long long T; #endif void bar (T); void foo (void) { { int i; i = 26; bar (i); } } { T j; j = 37; bar (j); } } { int i; i = 26; bar (i); } } { T j; j = 37; bar (j); } } { int i; i = 26; bar (i); } } { T j; j = 37; bar (j); } } { int i; i = 26; bar (i); } } { T j; j = 37; bar (j); } } }
/* PR middle-end/70550 */ /* { dg-do compile } */ /* { dg-additional-options "-Wuninitialized" } */ #ifdef __SIZEOF_INT128__ typedef __int128 T; #else typedef long long T; #endif void bar(T); #pragma omp declare target (bar) void foo(void) { { int i; #pragma omp target defaultmap(tofrom:scalar) /* { dg-bogus "is used * uninitialized" } */ { i = 26; bar(i); } } { T j; #pragma omp target defaultmap(tofrom:scalar) /* { dg-bogus "is used * uninitialized" } */ { j = 37; bar(j); } } { int i; #pragma omp target /* { dg-bogus "is used uninitialized" } */ { i = 26; bar(i); } } { T j; #pragma omp target /* { dg-bogus "is used uninitialized" } */ { j = 37; bar(j); } } { int i; #pragma omp target firstprivate (i) /* { dg-warning "is used * uninitialized" } */ { i = 26; bar(i); } } { T j; #pragma omp target firstprivate (j) /* { dg-warning "is used * uninitialized" } */ { j = 37; bar(j); } } { int i; #pragma omp target private (i) /* { dg-bogus "is used uninitialized" } */ { i = 26; bar(i); } } { T j; #pragma omp target private (j) /* { dg-bogus "is used uninitialized" } */ { j = 37; bar(j); } } }
DRB023-sections1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two tasks without synchronization to protect data write, causing data races. Data race pair: i@58:5 vs. i@60:5 */ #include <stdio.h> int main() { int i=0; #pragma omp parallel sections { #pragma omp section i = 1; #pragma omp section i = 2; } printf("i=%d\n",i); return 0; }
/* * Two tasks without synchronization to protect data write, causing data * races. Data race pair: i@58:5 vs. i@60:5 */ #include <stdio.h> int main() { int i = 0; i = 1; i = 2; printf("i=%d\n", i); return 0; }
/* * Two tasks without synchronization to protect data write, causing data * races. Data race pair: i@58:5 vs. i@60:5 */ #include <stdio.h> int main() { int i = 0; #pragma omp parallel sections { #pragma omp section i = 1; #pragma omp section i = 2; } printf("i=%d\n", i); return 0; }
SpatialConvolutionMM.c
#include <string.h> #ifdef USEQSML #include <stdbool.h> typedef struct qsml_info qsml_info; #include <qsml.h> #endif #include "../thnets.h" #ifndef USEQSML static void nn_unfolded_copy(THFloatTensor *finput, THFloatTensor *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; float *input_data = THFloatTensor_data(input); float *finput_data = THFloatTensor_data(finput); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane*kH*kW; k++) { long nip = k / (kH*kW); long rest = k % (kH*kW); long kh = rest / kW; long kw = rest % kW; long x,y; long long ix,iy; float *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); float *src = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { long lpad,rpad; for(y = 0; y < outputHeight; y++) { iy = (long long)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { memset(dst+y*outputWidth, 0, sizeof(float)*outputWidth); } else { if (dW==1){ ix = (long long)(0 - padW + kw); lpad = thfmaxf(0,padW-kw); rpad = thfmaxf(0,padW-(kW-kw-1)); if (outputWidth-rpad-lpad <= 0) { memset(dst+(y*outputWidth), 0, sizeof(float)*outputWidth); } else { if (lpad > 0) memset(dst+y*outputWidth, 0, sizeof(float)*lpad); memcpy(dst+(y*outputWidth+lpad), src+(iy*inputWidth+ix+lpad), sizeof(float)*(outputWidth-rpad-lpad)); if (rpad > 0) memset(dst+y*outputWidth + outputWidth - rpad, 0, sizeof(float)*rpad); } } else{ for (x=0; x<outputWidth; x++){ ix = (long long)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) memset(dst+(y*outputWidth+x), 0, sizeof(float)*1); else memcpy(dst+(y*outputWidth+x), src+(iy*inputWidth+ix), sizeof(float)*(1)); } } } } } else { for(y = 0; y < outputHeight; y++) { iy = (long long)(y*dH + kh); ix = (long long)(0 + kw); if (dW == 1) memcpy(dst+(y*outputWidth), src+(iy*inputWidth+ix), sizeof(float)*outputWidth); else{ for (x=0; x<outputWidth; x++) memcpy(dst+(y*outputWidth+x), src+(iy*inputWidth+ix+x*dW), sizeof(float)*(1)); } } } } } static void nn_SpatialConvolutionMM_updateOutput_frame(THFloatTensor *input, THFloatTensor *output, THFloatTensor *weight, THFloatTensor *bias, THFloatTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, long nInputPlane, long inputWidth, long inputHeight, long nOutputPlane, long outputWidth, long outputHeight) { THFloatTensor *output2d = 0; if(finput) { nn_unfolded_copy(finput, input, kW, kH, dW, dH, padW, padH, (int)nInputPlane, (int)inputWidth, (int)inputHeight, (int)outputWidth, (int)outputHeight); output2d = THFloatTensor_newWithStorage2d(output->storage, output->storageOffset, nOutputPlane, -1, outputHeight*outputWidth, -1); } long i; for (i = 0; i < nOutputPlane; i++) { float *data = output->storage->data + output->storageOffset + output->stride[0]*i; float what = bias && bias->storage ? THFloatTensor_data(bias)[i] : 0; long len = outputHeight*outputWidth; THFloatVector_fill(data, what, len); } if(finput) { THFloatTensor_addmm(output2d, 1, output2d, 1, weight, finput); THFloatTensor_free(output2d); } #ifndef USEBLAS else THFloatTensor_convmm(output, 1, 1, weight, input, kH, kW, dH, dW, padH, padW); #endif } #endif #ifdef USEQSML void applybias(struct module *newmod, int outP, int outW, int outH) { int i, wsize; wsize = outW*outH; float* biasdata = THFloatTensor_data(newmod->SpatialConvolution.bias); float* outdata = THFloatTensor_data(newmod->output); //float* biasdata = (float*) calloc(wsize*outP, sizeof(float));//one memcpy is better? //memcpy(outdata, biasdata, wsize*outP*sizeof(float));//only saves 1ms for(i=0; i<wsize; i++) memcpy(outdata+i*outP, biasdata, outP*sizeof(float)); } #endif THFloatTensor *nn_SpatialConvolutionMM_updateOutput(struct module *module, THFloatTensor *input) { int kW = module->SpatialConvolution.kW; int kH = module->SpatialConvolution.kH; int dW = module->SpatialConvolution.dW; int dH = module->SpatialConvolution.dH; int padW = module->SpatialConvolution.padW; int padH = module->SpatialConvolution.padH; THFloatTensor *finput = module->SpatialConvolution.finput;//thnets [col,row,plane,batch] #ifndef USEQSML int batch = 1; THFloatTensor *bias = module->SpatialConvolution.bias; #endif THFloatTensor *output = module->output;//[3col,2row,1plane,0batch] THFloatTensor *weight = THFloatTensor_new(); THFloatTensor_set(weight, module->SpatialConvolution.weight); THFloatTensor_resize2d(weight, weight->size[0], THFloatTensor_nElement(weight) / weight->size[0]); if (input->nDimension == 3) { #ifndef USEQSML batch = 0; #endif THFloatTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]); } long batchSize = input->size[0]; long nInputPlane = module->SpatialConvolution.nInputPlane; long nOutputPlane = module->SpatialConvolution.nOutputPlane; long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if(nInputPlane != input->size[1]) THError("nInputPlane %ld does not match input planes %ld", nInputPlane, input->size[1]); if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); if(module->type == MT_SpatialConvolutionMM) THFloatTensor_resize3d(finput, batchSize, kW*kH*nInputPlane, outputHeight*outputWidth); THFloatTensor_resize4d(output, batchSize, nOutputPlane, outputHeight, outputWidth); #ifdef USEQSML qsml_int channel = nInputPlane; qsml_int inH = inputHeight; qsml_int inW = inputWidth; qsml_int qkW = kW; qsml_int qkH = kH; qsml_int qdW = dW; qsml_int qdH = dH; qsml_int qpadW = padW; qsml_int qpadH = padH; qsml_int outP = nOutputPlane; qsml_int outH = outputHeight; qsml_int outW = outputWidth; applybias(module,(int)outP,(int)outW,(int)outH);//doesn't add much overhead on average float *image = THFloatTensor_data(input);//[plane, col, row] float *filtro = THFloatTensor_data(weight);//[outplane, plane, col, row] float *outf = THFloatTensor_data(output);//[plane, col, row] sconv_mm(true, image, inW, inH, channel, filtro, outP, qkW, qkH, qpadW, qpadH, qdW, qdH, outf, outW, outH); #else long t; #pragma omp parallel for if(batchSize >= 4) private(t) for (t = 0; t < batchSize; t++) { THFloatTensor *input_t = THFloatTensor_newSelect(input, 0, t); THFloatTensor *output_t = THFloatTensor_newSelect(output, 0, t); THFloatTensor *finput_t = module->type == MT_SpatialConvolutionMM ? THFloatTensor_newSelect(finput, 0, t) : 0; nn_SpatialConvolutionMM_updateOutput_frame(input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THFloatTensor_free(input_t); THFloatTensor_free(output_t); THFloatTensor_free(finput_t); } if (batch == 0) { THFloatTensor_resize3d(output, nOutputPlane, outputHeight, outputWidth); THFloatTensor_resize3d(input, nInputPlane, inputHeight, inputWidth); } THFloatTensor_free(weight); #endif return output; }
#include <string.h> #ifdef USEQSML #include <stdbool.h> typedef struct qsml_info qsml_info; #include <qsml.h> #endif #include "../thnets.h" #ifndef USEQSML static void nn_unfolded_copy(THFloatTensor *finput, THFloatTensor *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; float *input_data = THFloatTensor_data(input); float *finput_data = THFloatTensor_data(finput); for(k = 0; k < nInputPlane*kH*kW; k++) { long nip = k / (kH*kW); long rest = k % (kH*kW); long kh = rest / kW; long kw = rest % kW; long x,y; long long ix,iy; float *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); float *src = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { long lpad,rpad; for(y = 0; y < outputHeight; y++) { iy = (long long)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { memset(dst+y*outputWidth, 0, sizeof(float)*outputWidth); } else { if (dW==1){ ix = (long long)(0 - padW + kw); lpad = thfmaxf(0,padW-kw); rpad = thfmaxf(0,padW-(kW-kw-1)); if (outputWidth-rpad-lpad <= 0) { memset(dst+(y*outputWidth), 0, sizeof(float)*outputWidth); } else { if (lpad > 0) memset(dst+y*outputWidth, 0, sizeof(float)*lpad); memcpy(dst+(y*outputWidth+lpad), src+(iy*inputWidth+ix+lpad), sizeof(float)*(outputWidth-rpad-lpad)); if (rpad > 0) memset(dst+y*outputWidth + outputWidth - rpad, 0, sizeof(float)*rpad); } } else{ for (x=0; x<outputWidth; x++){ ix = (long long)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) memset(dst+(y*outputWidth+x), 0, sizeof(float)*1); else memcpy(dst+(y*outputWidth+x), src+(iy*inputWidth+ix), sizeof(float)*(1)); } } } } } else { for(y = 0; y < outputHeight; y++) { iy = (long long)(y*dH + kh); ix = (long long)(0 + kw); if (dW == 1) memcpy(dst+(y*outputWidth), src+(iy*inputWidth+ix), sizeof(float)*outputWidth); else{ for (x=0; x<outputWidth; x++) memcpy(dst+(y*outputWidth+x), src+(iy*inputWidth+ix+x*dW), sizeof(float)*(1)); } } } } } static void nn_SpatialConvolutionMM_updateOutput_frame(THFloatTensor *input, THFloatTensor *output, THFloatTensor *weight, THFloatTensor *bias, THFloatTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, long nInputPlane, long inputWidth, long inputHeight, long nOutputPlane, long outputWidth, long outputHeight) { THFloatTensor *output2d = 0; if(finput) { nn_unfolded_copy(finput, input, kW, kH, dW, dH, padW, padH, (int)nInputPlane, (int)inputWidth, (int)inputHeight, (int)outputWidth, (int)outputHeight); output2d = THFloatTensor_newWithStorage2d(output->storage, output->storageOffset, nOutputPlane, -1, outputHeight*outputWidth, -1); } long i; for (i = 0; i < nOutputPlane; i++) { float *data = output->storage->data + output->storageOffset + output->stride[0]*i; float what = bias && bias->storage ? THFloatTensor_data(bias)[i] : 0; long len = outputHeight*outputWidth; THFloatVector_fill(data, what, len); } if(finput) { THFloatTensor_addmm(output2d, 1, output2d, 1, weight, finput); THFloatTensor_free(output2d); } #ifndef USEBLAS else THFloatTensor_convmm(output, 1, 1, weight, input, kH, kW, dH, dW, padH, padW); #endif } #endif #ifdef USEQSML void applybias(struct module *newmod, int outP, int outW, int outH) { int i, wsize; wsize = outW*outH; float* biasdata = THFloatTensor_data(newmod->SpatialConvolution.bias); float* outdata = THFloatTensor_data(newmod->output); //float* biasdata = (float*) calloc(wsize*outP, sizeof(float));//one memcpy is better? //memcpy(outdata, biasdata, wsize*outP*sizeof(float));//only saves 1ms for(i=0; i<wsize; i++) memcpy(outdata+i*outP, biasdata, outP*sizeof(float)); } #endif THFloatTensor *nn_SpatialConvolutionMM_updateOutput(struct module *module, THFloatTensor *input) { int kW = module->SpatialConvolution.kW; int kH = module->SpatialConvolution.kH; int dW = module->SpatialConvolution.dW; int dH = module->SpatialConvolution.dH; int padW = module->SpatialConvolution.padW; int padH = module->SpatialConvolution.padH; THFloatTensor *finput = module->SpatialConvolution.finput;//thnets [col,row,plane,batch] #ifndef USEQSML int batch = 1; THFloatTensor *bias = module->SpatialConvolution.bias; #endif THFloatTensor *output = module->output;//[3col,2row,1plane,0batch] THFloatTensor *weight = THFloatTensor_new(); THFloatTensor_set(weight, module->SpatialConvolution.weight); THFloatTensor_resize2d(weight, weight->size[0], THFloatTensor_nElement(weight) / weight->size[0]); if (input->nDimension == 3) { #ifndef USEQSML batch = 0; #endif THFloatTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]); } long batchSize = input->size[0]; long nInputPlane = module->SpatialConvolution.nInputPlane; long nOutputPlane = module->SpatialConvolution.nOutputPlane; long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if(nInputPlane != input->size[1]) THError("nInputPlane %ld does not match input planes %ld", nInputPlane, input->size[1]); if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); if(module->type == MT_SpatialConvolutionMM) THFloatTensor_resize3d(finput, batchSize, kW*kH*nInputPlane, outputHeight*outputWidth); THFloatTensor_resize4d(output, batchSize, nOutputPlane, outputHeight, outputWidth); #ifdef USEQSML qsml_int channel = nInputPlane; qsml_int inH = inputHeight; qsml_int inW = inputWidth; qsml_int qkW = kW; qsml_int qkH = kH; qsml_int qdW = dW; qsml_int qdH = dH; qsml_int qpadW = padW; qsml_int qpadH = padH; qsml_int outP = nOutputPlane; qsml_int outH = outputHeight; qsml_int outW = outputWidth; applybias(module,(int)outP,(int)outW,(int)outH);//doesn't add much overhead on average float *image = THFloatTensor_data(input);//[plane, col, row] float *filtro = THFloatTensor_data(weight);//[outplane, plane, col, row] float *outf = THFloatTensor_data(output);//[plane, col, row] sconv_mm(true, image, inW, inH, channel, filtro, outP, qkW, qkH, qpadW, qpadH, qdW, qdH, outf, outW, outH); #else long t; for (t = 0; t < batchSize; t++) { THFloatTensor *input_t = THFloatTensor_newSelect(input, 0, t); THFloatTensor *output_t = THFloatTensor_newSelect(output, 0, t); THFloatTensor *finput_t = module->type == MT_SpatialConvolutionMM ? THFloatTensor_newSelect(finput, 0, t) : 0; nn_SpatialConvolutionMM_updateOutput_frame(input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THFloatTensor_free(input_t); THFloatTensor_free(output_t); THFloatTensor_free(finput_t); } if (batch == 0) { THFloatTensor_resize3d(output, nOutputPlane, outputHeight, outputWidth); THFloatTensor_resize3d(input, nInputPlane, inputHeight, inputWidth); } THFloatTensor_free(weight); #endif return output; }
#include <string.h> #ifdef USEQSML #include <stdbool.h> typedef struct qsml_info qsml_info; #include <qsml.h> #endif #include "../thnets.h" #ifndef USEQSML static void nn_unfolded_copy(THFloatTensor *finput, THFloatTensor *input, int kW, int kH, int dW, int dH, int padW, int padH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; float *input_data = THFloatTensor_data(input); float *finput_data = THFloatTensor_data(finput); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane*kH*kW; k++) { long nip = k / (kH*kW); long rest = k % (kH*kW); long kh = rest / kW; long kw = rest % kW; long x,y; long long ix,iy; float *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); float *src = input_data + nip*(inputHeight*inputWidth); if (padW > 0 || padH > 0) { long lpad,rpad; for(y = 0; y < outputHeight; y++) { iy = (long long)(y*dH - padH + kh); if (iy < 0 || iy >= inputHeight) { memset(dst+y*outputWidth, 0, sizeof(float)*outputWidth); } else { if (dW==1){ ix = (long long)(0 - padW + kw); lpad = thfmaxf(0,padW-kw); rpad = thfmaxf(0,padW-(kW-kw-1)); if (outputWidth-rpad-lpad <= 0) { memset(dst+(y*outputWidth), 0, sizeof(float)*outputWidth); } else { if (lpad > 0) memset(dst+y*outputWidth, 0, sizeof(float)*lpad); memcpy(dst+(y*outputWidth+lpad), src+(iy*inputWidth+ix+lpad), sizeof(float)*(outputWidth-rpad-lpad)); if (rpad > 0) memset(dst+y*outputWidth + outputWidth - rpad, 0, sizeof(float)*rpad); } } else{ for (x=0; x<outputWidth; x++){ ix = (long long)(x*dW - padW + kw); if (ix < 0 || ix >= inputWidth) memset(dst+(y*outputWidth+x), 0, sizeof(float)*1); else memcpy(dst+(y*outputWidth+x), src+(iy*inputWidth+ix), sizeof(float)*(1)); } } } } } else { for(y = 0; y < outputHeight; y++) { iy = (long long)(y*dH + kh); ix = (long long)(0 + kw); if (dW == 1) memcpy(dst+(y*outputWidth), src+(iy*inputWidth+ix), sizeof(float)*outputWidth); else{ for (x=0; x<outputWidth; x++) memcpy(dst+(y*outputWidth+x), src+(iy*inputWidth+ix+x*dW), sizeof(float)*(1)); } } } } } static void nn_SpatialConvolutionMM_updateOutput_frame(THFloatTensor *input, THFloatTensor *output, THFloatTensor *weight, THFloatTensor *bias, THFloatTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, long nInputPlane, long inputWidth, long inputHeight, long nOutputPlane, long outputWidth, long outputHeight) { THFloatTensor *output2d = 0; if(finput) { nn_unfolded_copy(finput, input, kW, kH, dW, dH, padW, padH, (int)nInputPlane, (int)inputWidth, (int)inputHeight, (int)outputWidth, (int)outputHeight); output2d = THFloatTensor_newWithStorage2d(output->storage, output->storageOffset, nOutputPlane, -1, outputHeight*outputWidth, -1); } long i; for (i = 0; i < nOutputPlane; i++) { float *data = output->storage->data + output->storageOffset + output->stride[0]*i; float what = bias && bias->storage ? THFloatTensor_data(bias)[i] : 0; long len = outputHeight*outputWidth; THFloatVector_fill(data, what, len); } if(finput) { THFloatTensor_addmm(output2d, 1, output2d, 1, weight, finput); THFloatTensor_free(output2d); } #ifndef USEBLAS else THFloatTensor_convmm(output, 1, 1, weight, input, kH, kW, dH, dW, padH, padW); #endif } #endif #ifdef USEQSML void applybias(struct module *newmod, int outP, int outW, int outH) { int i, wsize; wsize = outW*outH; float* biasdata = THFloatTensor_data(newmod->SpatialConvolution.bias); float* outdata = THFloatTensor_data(newmod->output); //float* biasdata = (float*) calloc(wsize*outP, sizeof(float));//one memcpy is better? //memcpy(outdata, biasdata, wsize*outP*sizeof(float));//only saves 1ms for(i=0; i<wsize; i++) memcpy(outdata+i*outP, biasdata, outP*sizeof(float)); } #endif THFloatTensor *nn_SpatialConvolutionMM_updateOutput(struct module *module, THFloatTensor *input) { int kW = module->SpatialConvolution.kW; int kH = module->SpatialConvolution.kH; int dW = module->SpatialConvolution.dW; int dH = module->SpatialConvolution.dH; int padW = module->SpatialConvolution.padW; int padH = module->SpatialConvolution.padH; THFloatTensor *finput = module->SpatialConvolution.finput;//thnets [col,row,plane,batch] #ifndef USEQSML int batch = 1; THFloatTensor *bias = module->SpatialConvolution.bias; #endif THFloatTensor *output = module->output;//[3col,2row,1plane,0batch] THFloatTensor *weight = THFloatTensor_new(); THFloatTensor_set(weight, module->SpatialConvolution.weight); THFloatTensor_resize2d(weight, weight->size[0], THFloatTensor_nElement(weight) / weight->size[0]); if (input->nDimension == 3) { #ifndef USEQSML batch = 0; #endif THFloatTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]); } long batchSize = input->size[0]; long nInputPlane = module->SpatialConvolution.nInputPlane; long nOutputPlane = module->SpatialConvolution.nOutputPlane; long inputWidth = input->size[3]; long inputHeight = input->size[2]; long outputWidth = (inputWidth + 2*padW - kW) / dW + 1; long outputHeight = (inputHeight + 2*padH - kH) / dH + 1; if(nInputPlane != input->size[1]) THError("nInputPlane %ld does not match input planes %ld", nInputPlane, input->size[1]); if (outputWidth < 1 || outputHeight < 1) THError("Given input size: (%dx%dx%d). Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth); if(module->type == MT_SpatialConvolutionMM) THFloatTensor_resize3d(finput, batchSize, kW*kH*nInputPlane, outputHeight*outputWidth); THFloatTensor_resize4d(output, batchSize, nOutputPlane, outputHeight, outputWidth); #ifdef USEQSML qsml_int channel = nInputPlane; qsml_int inH = inputHeight; qsml_int inW = inputWidth; qsml_int qkW = kW; qsml_int qkH = kH; qsml_int qdW = dW; qsml_int qdH = dH; qsml_int qpadW = padW; qsml_int qpadH = padH; qsml_int outP = nOutputPlane; qsml_int outH = outputHeight; qsml_int outW = outputWidth; applybias(module,(int)outP,(int)outW,(int)outH);//doesn't add much overhead on average float *image = THFloatTensor_data(input);//[plane, col, row] float *filtro = THFloatTensor_data(weight);//[outplane, plane, col, row] float *outf = THFloatTensor_data(output);//[plane, col, row] sconv_mm(true, image, inW, inH, channel, filtro, outP, qkW, qkH, qpadW, qpadH, qdW, qdH, outf, outW, outH); #else long t; #pragma omp parallel for if(batchSize >= 4) private(t) for (t = 0; t < batchSize; t++) { THFloatTensor *input_t = THFloatTensor_newSelect(input, 0, t); THFloatTensor *output_t = THFloatTensor_newSelect(output, 0, t); THFloatTensor *finput_t = module->type == MT_SpatialConvolutionMM ? THFloatTensor_newSelect(finput, 0, t) : 0; nn_SpatialConvolutionMM_updateOutput_frame(input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THFloatTensor_free(input_t); THFloatTensor_free(output_t); THFloatTensor_free(finput_t); } if (batch == 0) { THFloatTensor_resize3d(output, nOutputPlane, outputHeight, outputWidth); THFloatTensor_resize3d(input, nInputPlane, inputHeight, inputWidth); } THFloatTensor_free(weight); #endif return output; }
marlonbrot.c
#include <complex> #include <iostream> #include <omp.h> #include <mpi.h> #include <cstdlib> using namespace std; int main(int argc, char** argv){ MPI_Init(&argc, &argv); int world_size; MPI_Comm_size(MPI_COMM_WORLD, &world_size); int world_rank; MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); if (argc <= 3) { cout << "Error!"; exit(1); } int max_row, max_column, max_n; max_row = atoi(argv[1]); max_column = atoi(argv[2]); max_n = atoi(argv[3]); char **mat = (char**)malloc(sizeof(char*)*max_row); for (int i=0; i<max_row;i++) { mat[i]=(char*)malloc(sizeof(char)*max_column); } #pragma omp parallel for schedule(dynamic) for(int r = world_rank;r < max_row; ++r){ if (r % world_size != world_rank) { continue; } #pragma omp parallel for schedule(dynamic) for(int c = 0; c < max_column; ++c){ complex<float> z; int n = 0; while(abs(z) < 2 && ++n < max_n) z = (z * z) + decltype(z)( (float)c * 2 / max_column - 1.5, (float)r * 2 / max_row - 1 ); mat[r][c]=(n == max_n ? '#' : '.'); } } if (world_rank == 0) { for(int r = world_rank;r < max_row; ++r){ for(int c = 0; c < max_column; ++c){ if (r % world_size == world_rank) { continue; } MPI_Recv(mat[r], max_column, MPI_CHAR, r % world_size, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } for(int r = 0; r < max_row; ++r){ for(int c = 0; c < max_column; ++c) std::cout << mat[r][c]; cout << '\n'; } } else { for(int r = world_rank;r < max_row; ++r){ for(int c = 0; c < max_column; ++c){ if (r % world_size != world_rank) { continue; } MPI_Send(mat[r], max_column, MPI_CHAR, 0, 0, MPI_COMM_WORLD); } } } MPI_Finalize(); }
#include <complex> #include <iostream> #include <omp.h> #include <mpi.h> #include <cstdlib> using namespace std; int main(int argc, char **argv) { MPI_Init(&argc, &argv); int world_size; MPI_Comm_size(MPI_COMM_WORLD, &world_size); int world_rank; MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); if (argc <= 3) { cout << "Error!"; exit(1); } int max_row, max_column, max_n; max_row = atoi(argv[1]); max_column = atoi(argv[2]); max_n = atoi(argv[3]); char **mat = (char **)malloc(sizeof(char *) * max_row); for (int i = 0; i < max_row; i++) { mat[i] = (char *)malloc(sizeof(char) * max_column); } for (int r = world_rank; r < max_row; ++r) { if (r % world_size != world_rank) { continue; } for (int c = 0; c < max_column; ++c) { complex < float >z; int n = 0; while (abs(z) < 2 && ++n < max_n) z = (z * z) + decltype(z) ( (float)c * 2 / max_column - 1.5, (float)r * 2 / max_row - 1 ); mat[r][c] = (n == max_n ? '#' : '.'); } } if (world_rank == 0) { for (int r = world_rank; r < max_row; ++r) { for (int c = 0; c < max_column; ++c) { if (r % world_size == world_rank) { continue; } MPI_Recv(mat[r], max_column, MPI_CHAR, r % world_size, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } for (int r = 0; r < max_row; ++r) { for (int c = 0; c < max_column; ++c) std: : cout << mat[r][c]; cout << '\n'; } } else { for (int r = world_rank; r < max_row; ++r) { for (int c = 0; c < max_column; ++c) { if (r % world_size != world_rank) { continue; } MPI_Send(mat[r], max_column, MPI_CHAR, 0, 0, MPI_COMM_WORLD); } } } MPI_Finalize(); }
#include <complex> #include <iostream> #include <omp.h> #include <mpi.h> #include <cstdlib> using namespace std; int main(int argc, char** argv){ MPI_Init(&argc, &argv); int world_size; MPI_Comm_size(MPI_COMM_WORLD, &world_size); int world_rank; MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); if (argc <= 3) { cout << "Error!"; exit(1); } int max_row, max_column, max_n; max_row = atoi(argv[1]); max_column = atoi(argv[2]); max_n = atoi(argv[3]); char **mat = (char**)malloc(sizeof(char*)*max_row); for (int i=0; i<max_row;i++) { mat[i]=(char*)malloc(sizeof(char)*max_column); } #pragma omp parallel for schedule(dynamic) for(int r = world_rank;r < max_row; ++r){ if (r % world_size != world_rank) { continue; } #pragma omp parallel for schedule(dynamic) for(int c = 0; c < max_column; ++c){ complex<float> z; int n = 0; while(abs(z) < 2 && ++n < max_n) z = (z * z) + decltype(z)( (float)c * 2 / max_column - 1.5, (float)r * 2 / max_row - 1 ); mat[r][c]=(n == max_n ? '#' : '.'); } } if (world_rank == 0) { for(int r = world_rank;r < max_row; ++r){ for(int c = 0; c < max_column; ++c){ if (r % world_size == world_rank) { continue; } MPI_Recv(mat[r], max_column, MPI_CHAR, r % world_size, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } for(int r = 0; r < max_row; ++r){ for(int c = 0; c < max_column; ++c) std::cout << mat[r][c]; cout << '\n'; } } else { for(int r = world_rank;r < max_row; ++r){ for(int c = 0; c < max_column; ++c){ if (r % world_size != world_rank) { continue; } MPI_Send(mat[r], max_column, MPI_CHAR, 0, 0, MPI_COMM_WORLD); } } } MPI_Finalize(); }
residualbased_newton_raphson_contact_strategy.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "custom_utilities/process_factory_utility.h" #include "custom_utilities/contact_utilities.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ProcessFactoryUtility::Pointer ProcessesListType; typedef std::size_t IndexType; /** * @brief Default constructor * @param rModelPart The model part of the problem * @param p_scheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer p_scheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, p_scheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param p_scheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer p_scheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, p_scheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonContactStrategy() override = default; //******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************// //***********************************************************************************// /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY // Auxiliar zero array const array_1d<double, 3> zero_array = ZeroVector(3); // Set to zero the weighted gap ModelPart& r_model_part = StrategyBaseType::GetModelPart(); NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); const bool frictional = r_model_part.Is(SLIP); // We predict contact pressure in case of contact problem if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array); if (frictional) { VariableUtils().SetVectorVar(WEIGHTED_SLIP, zero_array, nodes_array); } // Compute the current gap ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // We predict a contact pressure ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const std::size_t step = r_process_info[STEP]; if (step == 1) { #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT); } } else { #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1)); } } } // BaseType::Predict(); // NOTE: May cause problems in dynamics!!! // // // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated // ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); // // // We predict contact pressure in case of contact problem // if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { // VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array); // // // Compute the current gap // ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // // // We predict a contact pressure // ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); // const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY]; // // // We iterate over the nodes // bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true; // // #pragma omp parallel for // for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { // auto it_node = nodes_array.begin() + i; // // const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP); // // const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter; // // if (current_gap < 0.0) { // it_node->Set(ACTIVE, true); // if (is_components) { // it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap; // } else { // const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL); // it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal; // } // } // } // } KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; BaseType::Initialize(); mFinalizeWasPerformed = false; // Initializing NL_ITERATION_NUMBER ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); r_process_info[NL_ITERATION_NUMBER] = 1; KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Add something if necessary return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { BaseType::InitializeSolutionStep(); mFinalizeWasPerformed = false; } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; if (mFinalizeWasPerformed == false) { BaseType::FinalizeSolutionStep(); // To avoid compute twice the FinalizeSolutionStep mFinalizeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; // bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations // bool is_converged = BaseSolveSolutionStep(); // Direct solution bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); if (r_model_part.IsNot(INTERACTION)) { // We get the system TSystemMatrixType& A = *BaseType::mpA; TSystemVectorType& Dx = *BaseType::mpDx; TSystemVectorType& b = *BaseType::mpb; // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); int inner_iteration = 0; while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; r_process_info[INNER_LOOP_ITERATION] = inner_iteration; is_converged = BaseSolveSolutionStep(); // We check the convergence BaseType::mpConvergenceCriteria->SetEchoLevel(0); is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b); BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel); if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { // We compute the base loop r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1; is_converged = BaseSolveSolutionStep(); } if (mThisParameters["adaptative_strategy"].GetBool()) { if (!is_converged) { is_converged = AdaptativeStep(); } } return is_converged; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters // ADAPTATIVE STRATEGY PARAMETERS bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed ProcessesListType mpMyProcesses; /// The processes list ProcessesListType mpPostProcesses; /// The post processes list // OTHER PARAMETERS int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria ///@} ///@name Protected Operators ///@{ /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool BaseSolveSolutionStep() { KRATOS_TRY; // Pointers needed in the solution ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; //initializing the parameters of the Newton-Raphson cicle IndexType iteration_number = 1; r_process_info[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // We do a geometry check before solve the system for first time if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { //initialisation of the convergence criteria BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, r_dof_set, rA, rDx, rb); if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } // Iteration Cicle... performed only for NonLinearProblems while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) { //setting the number of iteration r_process_info[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) { if( BaseType::GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; //std::cout << "mb is calculated" << std::endl; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0) MaxIterationsExceeded(); // Recalculate residual if needed // (note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; KRATOS_CATCH(""); } /** * @brief This method performs the adaptative step */ bool AdaptativeStep() { KRATOS_TRY; bool is_converged = false; // Plots a warning if the maximum number of iterations is exceeded if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl; if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl; ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later int split_number = 0; // We iterate until we reach the convergence or we split more than desired while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) { // Expliting time step as a way to try improve the convergence split_number += 1; double aux_delta_time, current_time; const double aux_time = SplitTimeStep(aux_delta_time, current_time); current_time += aux_delta_time; bool inside_the_split_is_converged = false; IndexType inner_iteration = 0; while (current_time <= aux_time) { inner_iteration += 1; r_process_info[STEP] += 1; if (inner_iteration == 1) { if (StrategyBaseType::MoveMeshFlag()) UnMoveMesh(); NodesArrayType& nodes_array = r_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; it_node->OverwriteSolutionStepData(1, 0); // it_node->OverwriteSolutionStepData(2, 1); } r_process_info.SetCurrentTime(current_time); // Reduces the time step FinalizeSolutionStep(); } else { NodesArrayType& nodes_array = r_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) (nodes_array.begin() + i)->CloneSolutionStepData(); r_process_info.CloneSolutionStepInfo(); r_process_info.ClearHistory(r_model_part.GetBufferSize()); r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step } // We execute the processes before the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteInitializeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteInitializeSolutionStep(); // In order to initialize again everything BaseType::mInitializeWasPerformed = false; mFinalizeWasPerformed = false; // We repeat the solve with the new DELTA_TIME this->Initialize(); this->InitializeSolutionStep(); this->Predict(); inside_the_split_is_converged = BaseType::SolveSolutionStep(); this->FinalizeSolutionStep(); // We execute the processes after the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteFinalizeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteFinalizeSolutionStep(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteBeforeOutputStep(); if (mpPostProcesses != nullptr) mpPostProcesses->PrintOutput(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteAfterOutputStep(); current_time += aux_delta_time; } if (inside_the_split_is_converged) is_converged = true; } // Plots a warning if the maximum number of iterations and splits are exceeded if (is_converged == false) MaxIterationsAndSplitsExceeded(); // Restoring original DELTA_TIME r_process_info[DELTA_TIME] = original_delta_time; return is_converged; KRATOS_CATCH(""); } /** * @brief Here the database is updated * @param A The LHS matrix * @param Dx The increment of solution after solving system * @param b The RHS vector * @param MoveMesh The flag that tells if the mesh should be moved */ void UpdateDatabase( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, const bool MoveMesh ) override { BaseType::UpdateDatabase(A,Dx,b,MoveMesh); // TODO: Add something if necessary } /** * @brief his method checks if there is no element inverted */ bool CheckGeometryInverted() { ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); bool inverted_element = false; ElementsArrayType& elements_array = r_model_part.Elements(); // NOT OMP for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) { auto it_elem = elements_array.begin() + i; auto& geom = it_elem->GetGeometry(); if (geom.DeterminantOfJacobian(0) < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(geom.DeterminantOfJacobian(0)) } return true; } // We check now the deformation gradient std::vector<Matrix> deformation_gradient_matrices; it_elem->GetValueOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info); for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) { const double det_f = MathUtils<double>::DetMat(deformation_gradient_matrices[i_gp]); if (det_f < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(det_f) } return true; } } } return inverted_element; } /** * @brief Here the time step is splitted * @param AuxDeltaTime The new delta time to be considered * @param CurrentTime The current time * @return The destination time */ double SplitTimeStep( double& AuxDeltaTime, double& CurrentTime ) { KRATOS_TRY; const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME]; CurrentTime = aux_time - AuxDeltaTime; StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one AuxDeltaTime /= mThisParameters["split_factor"].GetDouble(); StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time CoutSplittingTime(AuxDeltaTime, aux_time); return aux_time; KRATOS_CATCH(""); } /** * This method moves bak the mesh to the previous position */ void UnMoveMesh() { KRATOS_TRY; if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false) KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl; NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates(); noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1); } KRATOS_CATCH(""); } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() { Parameters default_parameters = Parameters(R"( { "adaptative_strategy" : false, "split_factor" : 10.0, "max_number_splits" : 3, "inner_loop_iterations" : 5 })" ); return default_parameters; } /** * @brief This method prints information after solving the problem */ void CoutSolvingProblem() { if (mConvergenceCriteriaEchoLevel != 0) { std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl; } } /** * @brief This method prints information after split the increment of time * @param AuxDeltaTime The new time step to be considered * @param AuxTime The destination time */ void CoutSplittingTime( const double AuxDeltaTime, const double AuxTime ) { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; std::cout.precision(4); std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl; std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl; std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl; std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations */ void MaxIterationsExceeded() override { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations and splits */ void MaxIterationsAndSplitsExceeded() { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "custom_utilities/process_factory_utility.h" #include "custom_utilities/contact_utilities.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ProcessFactoryUtility::Pointer ProcessesListType; typedef std::size_t IndexType; /** * @brief Default constructor * @param rModelPart The model part of the problem * @param p_scheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer p_scheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, p_scheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param p_scheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer p_scheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, p_scheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonContactStrategy() override = default; //******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************// //***********************************************************************************// /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY // Auxiliar zero array const array_1d<double, 3> zero_array = ZeroVector(3); // Set to zero the weighted gap ModelPart& r_model_part = StrategyBaseType::GetModelPart(); NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); const bool frictional = r_model_part.Is(SLIP); // We predict contact pressure in case of contact problem if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array); if (frictional) { VariableUtils().SetVectorVar(WEIGHTED_SLIP, zero_array, nodes_array); } // Compute the current gap ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // We predict a contact pressure ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const std::size_t step = r_process_info[STEP]; if (step == 1) { for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT); } } else { for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1)); } } } // BaseType::Predict(); // NOTE: May cause problems in dynamics!!! // // // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated // ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); // // // We predict contact pressure in case of contact problem // if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { // VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array); // // // Compute the current gap // ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // // // We predict a contact pressure // ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); // const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY]; // // // We iterate over the nodes // bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true; // // // for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { // auto it_node = nodes_array.begin() + i; // // const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP); // // const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter; // // if (current_gap < 0.0) { // it_node->Set(ACTIVE, true); // if (is_components) { // it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap; // } else { // const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL); // it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal; // } // } // } // } KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; BaseType::Initialize(); mFinalizeWasPerformed = false; // Initializing NL_ITERATION_NUMBER ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); r_process_info[NL_ITERATION_NUMBER] = 1; KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Add something if necessary return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { BaseType::InitializeSolutionStep(); mFinalizeWasPerformed = false; } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; if (mFinalizeWasPerformed == false) { BaseType::FinalizeSolutionStep(); // To avoid compute twice the FinalizeSolutionStep mFinalizeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; // bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations // bool is_converged = BaseSolveSolutionStep(); // Direct solution bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); if (r_model_part.IsNot(INTERACTION)) { // We get the system TSystemMatrixType& A = *BaseType::mpA; TSystemVectorType& Dx = *BaseType::mpDx; TSystemVectorType& b = *BaseType::mpb; // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); int inner_iteration = 0; while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; r_process_info[INNER_LOOP_ITERATION] = inner_iteration; is_converged = BaseSolveSolutionStep(); // We check the convergence BaseType::mpConvergenceCriteria->SetEchoLevel(0); is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b); BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel); if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { // We compute the base loop r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1; is_converged = BaseSolveSolutionStep(); } if (mThisParameters["adaptative_strategy"].GetBool()) { if (!is_converged) { is_converged = AdaptativeStep(); } } return is_converged; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters // ADAPTATIVE STRATEGY PARAMETERS bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed ProcessesListType mpMyProcesses; /// The processes list ProcessesListType mpPostProcesses; /// The post processes list // OTHER PARAMETERS int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria ///@} ///@name Protected Operators ///@{ /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool BaseSolveSolutionStep() { KRATOS_TRY; // Pointers needed in the solution ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; //initializing the parameters of the Newton-Raphson cicle IndexType iteration_number = 1; r_process_info[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // We do a geometry check before solve the system for first time if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { //initialisation of the convergence criteria BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, r_dof_set, rA, rDx, rb); if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } // Iteration Cicle... performed only for NonLinearProblems while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) { //setting the number of iteration r_process_info[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) { if( BaseType::GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; //std::cout << "mb is calculated" << std::endl; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0) MaxIterationsExceeded(); // Recalculate residual if needed // (note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; KRATOS_CATCH(""); } /** * @brief This method performs the adaptative step */ bool AdaptativeStep() { KRATOS_TRY; bool is_converged = false; // Plots a warning if the maximum number of iterations is exceeded if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl; if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl; ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later int split_number = 0; // We iterate until we reach the convergence or we split more than desired while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) { // Expliting time step as a way to try improve the convergence split_number += 1; double aux_delta_time, current_time; const double aux_time = SplitTimeStep(aux_delta_time, current_time); current_time += aux_delta_time; bool inside_the_split_is_converged = false; IndexType inner_iteration = 0; while (current_time <= aux_time) { inner_iteration += 1; r_process_info[STEP] += 1; if (inner_iteration == 1) { if (StrategyBaseType::MoveMeshFlag()) UnMoveMesh(); NodesArrayType& nodes_array = r_model_part.Nodes(); for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; it_node->OverwriteSolutionStepData(1, 0); // it_node->OverwriteSolutionStepData(2, 1); } r_process_info.SetCurrentTime(current_time); // Reduces the time step FinalizeSolutionStep(); } else { NodesArrayType& nodes_array = r_model_part.Nodes(); for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) (nodes_array.begin() + i)->CloneSolutionStepData(); r_process_info.CloneSolutionStepInfo(); r_process_info.ClearHistory(r_model_part.GetBufferSize()); r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step } // We execute the processes before the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteInitializeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteInitializeSolutionStep(); // In order to initialize again everything BaseType::mInitializeWasPerformed = false; mFinalizeWasPerformed = false; // We repeat the solve with the new DELTA_TIME this->Initialize(); this->InitializeSolutionStep(); this->Predict(); inside_the_split_is_converged = BaseType::SolveSolutionStep(); this->FinalizeSolutionStep(); // We execute the processes after the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteFinalizeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteFinalizeSolutionStep(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteBeforeOutputStep(); if (mpPostProcesses != nullptr) mpPostProcesses->PrintOutput(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteAfterOutputStep(); current_time += aux_delta_time; } if (inside_the_split_is_converged) is_converged = true; } // Plots a warning if the maximum number of iterations and splits are exceeded if (is_converged == false) MaxIterationsAndSplitsExceeded(); // Restoring original DELTA_TIME r_process_info[DELTA_TIME] = original_delta_time; return is_converged; KRATOS_CATCH(""); } /** * @brief Here the database is updated * @param A The LHS matrix * @param Dx The increment of solution after solving system * @param b The RHS vector * @param MoveMesh The flag that tells if the mesh should be moved */ void UpdateDatabase( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, const bool MoveMesh ) override { BaseType::UpdateDatabase(A,Dx,b,MoveMesh); // TODO: Add something if necessary } /** * @brief his method checks if there is no element inverted */ bool CheckGeometryInverted() { ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); bool inverted_element = false; ElementsArrayType& elements_array = r_model_part.Elements(); // NOT OMP for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) { auto it_elem = elements_array.begin() + i; auto& geom = it_elem->GetGeometry(); if (geom.DeterminantOfJacobian(0) < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(geom.DeterminantOfJacobian(0)) } return true; } // We check now the deformation gradient std::vector<Matrix> deformation_gradient_matrices; it_elem->GetValueOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info); for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) { const double det_f = MathUtils<double>::DetMat(deformation_gradient_matrices[i_gp]); if (det_f < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(det_f) } return true; } } } return inverted_element; } /** * @brief Here the time step is splitted * @param AuxDeltaTime The new delta time to be considered * @param CurrentTime The current time * @return The destination time */ double SplitTimeStep( double& AuxDeltaTime, double& CurrentTime ) { KRATOS_TRY; const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME]; CurrentTime = aux_time - AuxDeltaTime; StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one AuxDeltaTime /= mThisParameters["split_factor"].GetDouble(); StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time CoutSplittingTime(AuxDeltaTime, aux_time); return aux_time; KRATOS_CATCH(""); } /** * This method moves bak the mesh to the previous position */ void UnMoveMesh() { KRATOS_TRY; if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false) KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl; NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates(); noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1); } KRATOS_CATCH(""); } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() { Parameters default_parameters = Parameters(R"( { "adaptative_strategy" : false, "split_factor" : 10.0, "max_number_splits" : 3, "inner_loop_iterations" : 5 })" ); return default_parameters; } /** * @brief This method prints information after solving the problem */ void CoutSolvingProblem() { if (mConvergenceCriteriaEchoLevel != 0) { std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl; } } /** * @brief This method prints information after split the increment of time * @param AuxDeltaTime The new time step to be considered * @param AuxTime The destination time */ void CoutSplittingTime( const double AuxDeltaTime, const double AuxTime ) { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; std::cout.precision(4); std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl; std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl; std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl; std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations */ void MaxIterationsExceeded() override { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations and splits */ void MaxIterationsAndSplitsExceeded() { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "custom_utilities/process_factory_utility.h" #include "custom_utilities/contact_utilities.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ProcessFactoryUtility::Pointer ProcessesListType; typedef std::size_t IndexType; /** * @brief Default constructor * @param rModelPart The model part of the problem * @param p_scheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer p_scheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, p_scheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param p_scheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ ResidualBasedNewtonRaphsonContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer p_scheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})"), ProcessesListType pMyProcesses = nullptr, ProcessesListType pPostProcesses = nullptr ) : ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, p_scheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters), mpMyProcesses(pMyProcesses), mpPostProcesses(pPostProcesses) { KRATOS_TRY; mConvergenceCriteriaEchoLevel = pNewConvergenceCriteria->GetEchoLevel(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonContactStrategy() override = default; //******************** OPERATIONS ACCESSIBLE FROM THE INPUT: ************************// //***********************************************************************************// /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY // Auxiliar zero array const array_1d<double, 3> zero_array = ZeroVector(3); // Set to zero the weighted gap ModelPart& r_model_part = StrategyBaseType::GetModelPart(); NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); const bool frictional = r_model_part.Is(SLIP); // We predict contact pressure in case of contact problem if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array); if (frictional) { VariableUtils().SetVectorVar(WEIGHTED_SLIP, zero_array, nodes_array); } // Compute the current gap ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // We predict a contact pressure ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const std::size_t step = r_process_info[STEP]; if (step == 1) { #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT); } } else { #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) += (it_node->FastGetSolutionStepValue(DISPLACEMENT) - it_node->FastGetSolutionStepValue(DISPLACEMENT, 1)); } } } // BaseType::Predict(); // NOTE: May cause problems in dynamics!!! // // // Set to zero the weighted gap // NOTE: This can be done during the search if the predict is deactivated // ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // NodesArrayType& nodes_array = r_model_part.GetSubModelPart("Contact").Nodes(); // // // We predict contact pressure in case of contact problem // if (nodes_array.begin()->SolutionStepsDataHas(WEIGHTED_GAP)) { // VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, nodes_array); // // // Compute the current gap // ContactUtilities::ComputeExplicitContributionConditions(r_model_part.GetSubModelPart("ComputingContact")); // // // We predict a contact pressure // ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); // const double initial_penalty_parameter = r_process_info[INITIAL_PENALTY]; // // // We iterate over the nodes // bool is_components = nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) ? false : true; // // #pragma omp parallel for // for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { // auto it_node = nodes_array.begin() + i; // // const double current_gap = it_node->FastGetSolutionStepValue(WEIGHTED_GAP); // // const double penalty = it_node->Has(INITIAL_PENALTY) ? it_node->GetValue(INITIAL_PENALTY) : initial_penalty_parameter; // // if (current_gap < 0.0) { // it_node->Set(ACTIVE, true); // if (is_components) { // it_node->FastGetSolutionStepValue(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE) = penalty * current_gap; // } else { // const array_1d<double, 3>& normal = it_node->FastGetSolutionStepValue(NORMAL); // it_node->FastGetSolutionStepValue(VECTOR_LAGRANGE_MULTIPLIER) = penalty * current_gap * normal; // } // } // } // } KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; BaseType::Initialize(); mFinalizeWasPerformed = false; // Initializing NL_ITERATION_NUMBER ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); r_process_info[NL_ITERATION_NUMBER] = 1; KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Add something if necessary return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { BaseType::InitializeSolutionStep(); mFinalizeWasPerformed = false; } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; if (mFinalizeWasPerformed == false) { BaseType::FinalizeSolutionStep(); // To avoid compute twice the FinalizeSolutionStep mFinalizeWasPerformed = true; } KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; // bool is_converged = BaseType::SolveSolutionStep(); // FIXME: Requires to separate the non linear iterations // bool is_converged = BaseSolveSolutionStep(); // Direct solution bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); if (r_model_part.IsNot(INTERACTION)) { // We get the system TSystemMatrixType& A = *BaseType::mpA; TSystemVectorType& Dx = *BaseType::mpDx; TSystemVectorType& b = *BaseType::mpb; // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); int inner_iteration = 0; while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << std::endl << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << inner_iteration;; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; r_process_info[INNER_LOOP_ITERATION] = inner_iteration; is_converged = BaseSolveSolutionStep(); // We check the convergence BaseType::mpConvergenceCriteria->SetEchoLevel(0); is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), A, Dx, b); BaseType::mpConvergenceCriteria->SetEchoLevel(mConvergenceCriteriaEchoLevel); if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { if (is_converged) std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else std::cout << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { // We compute the base loop r_model_part.GetProcessInfo()[INNER_LOOP_ITERATION] = 1; is_converged = BaseSolveSolutionStep(); } if (mThisParameters["adaptative_strategy"].GetBool()) { if (!is_converged) { is_converged = AdaptativeStep(); } } return is_converged; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters // ADAPTATIVE STRATEGY PARAMETERS bool mFinalizeWasPerformed; /// If the FinalizeSolutionStep has been already permformed ProcessesListType mpMyProcesses; /// The processes list ProcessesListType mpPostProcesses; /// The post processes list // OTHER PARAMETERS int mConvergenceCriteriaEchoLevel; /// The echo level of the convergence criteria ///@} ///@name Protected Operators ///@{ /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool BaseSolveSolutionStep() { KRATOS_TRY; // Pointers needed in the solution ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; //initializing the parameters of the Newton-Raphson cicle IndexType iteration_number = 1; r_process_info[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); // We do a geometry check before solve the system for first time if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT BEFORE FIRST SOLVE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); if (is_converged) { //initialisation of the convergence criteria BaseType::mpConvergenceCriteria->InitializeSolutionStep(r_model_part, r_dof_set, rA, rDx, rb); if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } // Iteration Cicle... performed only for NonLinearProblems while (is_converged == false && iteration_number++<BaseType::mMaxIterationNumber) { //setting the number of iteration r_process_info[NL_ITERATION_NUMBER] = iteration_number; p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, r_dof_set, rA, rDx, rb); //call the linear system solver to find the correction mDx for the //it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || StrategyBaseType::mStiffnessMatrixIsBuilt == false ) { if( BaseType::GetKeepSystemConstantDuringIterations() == false) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("No DoFs") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); // We now check the geometry if (mThisParameters["adaptative_strategy"].GetBool()) { if (CheckGeometryInverted()) { KRATOS_WARNING("Element inverted") << "INVERTED ELEMENT DURING DATABASE UPDATE" << std::endl; r_process_info[STEP] -= 1; // We revert one step in the case that the geometry is already broken before start the computing return false; } } p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; //std::cout << "mb is calculated" << std::endl; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, r_dof_set, rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber && r_model_part.GetCommunicator().MyPID() == 0) MaxIterationsExceeded(); // Recalculate residual if needed // (note that some convergence criteria need it to be recalculated) if (residual_is_updated == false) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; KRATOS_CATCH(""); } /** * @brief This method performs the adaptative step */ bool AdaptativeStep() { KRATOS_TRY; bool is_converged = false; // Plots a warning if the maximum number of iterations is exceeded if (mpMyProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python processes") << "If you have not implemented any method to recalculate BC or loads in function of time, this strategy will be USELESS" << std::endl; if (mpPostProcesses == nullptr && StrategyBaseType::mEchoLevel > 0) KRATOS_WARNING("No python post processes") << "If you don't add the postprocesses and the time step if splitted you won't postprocess that steps" << std::endl; ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); const double original_delta_time = r_process_info[DELTA_TIME]; // We save the delta time to restore later int split_number = 0; // We iterate until we reach the convergence or we split more than desired while (is_converged == false && split_number <= mThisParameters["max_number_splits"].GetInt()) { // Expliting time step as a way to try improve the convergence split_number += 1; double aux_delta_time, current_time; const double aux_time = SplitTimeStep(aux_delta_time, current_time); current_time += aux_delta_time; bool inside_the_split_is_converged = false; IndexType inner_iteration = 0; while (current_time <= aux_time) { inner_iteration += 1; r_process_info[STEP] += 1; if (inner_iteration == 1) { if (StrategyBaseType::MoveMeshFlag()) UnMoveMesh(); NodesArrayType& nodes_array = r_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; it_node->OverwriteSolutionStepData(1, 0); // it_node->OverwriteSolutionStepData(2, 1); } r_process_info.SetCurrentTime(current_time); // Reduces the time step FinalizeSolutionStep(); } else { NodesArrayType& nodes_array = r_model_part.Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) (nodes_array.begin() + i)->CloneSolutionStepData(); r_process_info.CloneSolutionStepInfo(); r_process_info.ClearHistory(r_model_part.GetBufferSize()); r_process_info.SetAsTimeStepInfo(current_time); // Sets the new time step } // We execute the processes before the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteInitializeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteInitializeSolutionStep(); // In order to initialize again everything BaseType::mInitializeWasPerformed = false; mFinalizeWasPerformed = false; // We repeat the solve with the new DELTA_TIME this->Initialize(); this->InitializeSolutionStep(); this->Predict(); inside_the_split_is_converged = BaseType::SolveSolutionStep(); this->FinalizeSolutionStep(); // We execute the processes after the non-linear iteration if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteFinalizeSolutionStep(); if (mpPostProcesses != nullptr) mpPostProcesses->ExecuteFinalizeSolutionStep(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteBeforeOutputStep(); if (mpPostProcesses != nullptr) mpPostProcesses->PrintOutput(); if (mpMyProcesses != nullptr) mpMyProcesses->ExecuteAfterOutputStep(); current_time += aux_delta_time; } if (inside_the_split_is_converged) is_converged = true; } // Plots a warning if the maximum number of iterations and splits are exceeded if (is_converged == false) MaxIterationsAndSplitsExceeded(); // Restoring original DELTA_TIME r_process_info[DELTA_TIME] = original_delta_time; return is_converged; KRATOS_CATCH(""); } /** * @brief Here the database is updated * @param A The LHS matrix * @param Dx The increment of solution after solving system * @param b The RHS vector * @param MoveMesh The flag that tells if the mesh should be moved */ void UpdateDatabase( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, const bool MoveMesh ) override { BaseType::UpdateDatabase(A,Dx,b,MoveMesh); // TODO: Add something if necessary } /** * @brief his method checks if there is no element inverted */ bool CheckGeometryInverted() { ModelPart& r_model_part = StrategyBaseType::GetModelPart(); ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); bool inverted_element = false; ElementsArrayType& elements_array = r_model_part.Elements(); // NOT OMP for(int i = 0; i < static_cast<int>(elements_array.size()); ++i) { auto it_elem = elements_array.begin() + i; auto& geom = it_elem->GetGeometry(); if (geom.DeterminantOfJacobian(0) < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(geom.DeterminantOfJacobian(0)) } return true; } // We check now the deformation gradient std::vector<Matrix> deformation_gradient_matrices; it_elem->GetValueOnIntegrationPoints( DEFORMATION_GRADIENT, deformation_gradient_matrices, r_process_info); for (IndexType i_gp = 0; i_gp < deformation_gradient_matrices.size(); ++i_gp) { const double det_f = MathUtils<double>::DetMat(deformation_gradient_matrices[i_gp]); if (det_f < 0.0) { if (mConvergenceCriteriaEchoLevel > 0) { KRATOS_WATCH(it_elem->Id()) KRATOS_WATCH(det_f) } return true; } } } return inverted_element; } /** * @brief Here the time step is splitted * @param AuxDeltaTime The new delta time to be considered * @param CurrentTime The current time * @return The destination time */ double SplitTimeStep( double& AuxDeltaTime, double& CurrentTime ) { KRATOS_TRY; const double aux_time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; AuxDeltaTime = StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME]; CurrentTime = aux_time - AuxDeltaTime; StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] = CurrentTime; // Restore time to the previous one AuxDeltaTime /= mThisParameters["split_factor"].GetDouble(); StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] = AuxDeltaTime; // Change delta time CoutSplittingTime(AuxDeltaTime, aux_time); return aux_time; KRATOS_CATCH(""); } /** * This method moves bak the mesh to the previous position */ void UnMoveMesh() { KRATOS_TRY; if (StrategyBaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(DISPLACEMENT_X) == false) KRATOS_ERROR << "It is impossible to move the mesh since the DISPLACEMENT var is not in the model_part. Either use SetMoveMeshFlag(False) or add DISPLACEMENT to the list of variables" << std::endl; NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes(); #pragma omp parallel for for(int i = 0; i < static_cast<int>(nodes_array.size()); ++i) { auto it_node = nodes_array.begin() + i; noalias(it_node->Coordinates()) = it_node->GetInitialPosition().Coordinates(); noalias(it_node->Coordinates()) += it_node->FastGetSolutionStepValue(DISPLACEMENT, 1); } KRATOS_CATCH(""); } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() { Parameters default_parameters = Parameters(R"( { "adaptative_strategy" : false, "split_factor" : 10.0, "max_number_splits" : 3, "inner_loop_iterations" : 5 })" ); return default_parameters; } /** * @brief This method prints information after solving the problem */ void CoutSolvingProblem() { if (mConvergenceCriteriaEchoLevel != 0) { std::cout << "STEP: " << StrategyBaseType::GetModelPart().GetProcessInfo()[STEP] << "\t NON LINEAR ITERATION: " << StrategyBaseType::GetModelPart().GetProcessInfo()[NL_ITERATION_NUMBER] << "\t TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[TIME] << "\t DELTA TIME: " << StrategyBaseType::GetModelPart().GetProcessInfo()[DELTA_TIME] << std::endl; } } /** * @brief This method prints information after split the increment of time * @param AuxDeltaTime The new time step to be considered * @param AuxTime The destination time */ void CoutSplittingTime( const double AuxDeltaTime, const double AuxTime ) { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { const double Time = StrategyBaseType::GetModelPart().GetProcessInfo()[TIME]; std::cout.precision(4); std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT("SPLITTING TIME STEP") << " |" << std::endl; std::cout << "| " << BOLDFONT("COMING BACK TO TIME: ") << std::scientific << Time << " |" << std::endl; std::cout << "| " << BOLDFONT(" NEW TIME STEP: ") << std::scientific << AuxDeltaTime << " |" << std::endl; std::cout << "| " << BOLDFONT(" UNTIL TIME: ") << std::scientific << AuxTime << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations */ void MaxIterationsExceeded() override { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } /** * @brief This method prints information after reach the max number of interations and splits */ void MaxIterationsAndSplitsExceeded() { if (mConvergenceCriteriaEchoLevel > 0 && StrategyBaseType::GetModelPart().GetCommunicator().MyPID() == 0 ) { std::cout << "|----------------------------------------------------|" << std::endl; std::cout << "| " << BOLDFONT(FRED("ATTENTION: Max iterations exceeded")) << " |" << std::endl; std::cout << "| " << BOLDFONT(FRED(" Max number of splits exceeded ")) << " |" << std::endl; std::cout << "|----------------------------------------------------|" << std::endl; } } ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonContactStrategy(const ResidualBasedNewtonRaphsonContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_CONTACT_STRATEGY */
symm_x_dia_n_hi_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Number* Y = &y[index2(cc,0,ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i],Y[i],beta); const ALPHA_Number* X = &x[index2(cc,0,ldx)]; for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d > 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); alpha_madde(Y[ar],val,X[ac]); alpha_madde(Y[ac],val,X[ar]); } } if(d == 0){ for(ALPHA_INT r = 0; r < mat->rows; ++r){ ALPHA_Number val; alpha_mul(val,mat->values[index2(di,r,mat->lval)],alpha); alpha_madde(Y[r],val,X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA * mat, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number * y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Number *Y = &y[index2(cc, 0, ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i], Y[i], beta); const ALPHA_Number *X = &x[index2(cc, 0, ldx)]; for (ALPHA_INT di = 0; di < mat->ndiag; ++di) { ALPHA_INT d = mat->distance[di]; if (d > 0) { ALPHA_INT ars = alpha_max(0, -d); ALPHA_INT acs = alpha_max(0, d); ALPHA_INT an = alpha_min(mat->rows - ars, mat->cols - acs); for (ALPHA_INT i = 0; i < an; ++i) { ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul(val, mat->values[index2(di, ar, mat->lval)], alpha); alpha_madde(Y[ar], val, X[ac]); alpha_madde(Y[ac], val, X[ar]); } } if (d == 0) { for (ALPHA_INT r = 0; r < mat->rows; ++r) { ALPHA_Number val; alpha_mul(val, mat->values[index2(di, r, mat->lval)], alpha); alpha_madde(Y[r], val, X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA * mat, const ALPHA_Number * x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number * y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { ALPHA_Number *Y = &y[index2(cc, 0, ldy)]; for (ALPHA_INT i = 0; i < mat->rows; i++) alpha_mul(Y[i], Y[i], beta); const ALPHA_Number *X = &x[index2(cc, 0, ldx)]; for (ALPHA_INT di = 0; di < mat->ndiag; ++di) { ALPHA_INT d = mat->distance[di]; if (d > 0) { ALPHA_INT ars = alpha_max(0, -d); ALPHA_INT acs = alpha_max(0, d); ALPHA_INT an = alpha_min(mat->rows - ars, mat->cols - acs); for (ALPHA_INT i = 0; i < an; ++i) { ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul(val, mat->values[index2(di, ar, mat->lval)], alpha); alpha_madde(Y[ar], val, X[ac]); alpha_madde(Y[ac], val, X[ar]); } } if (d == 0) { for (ALPHA_INT r = 0; r < mat->rows; ++r) { ALPHA_Number val; alpha_mul(val, mat->values[index2(di, r, mat->lval)], alpha); alpha_madde(Y[r], val, X[r]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
nbnxn_kernel_simd_4xn.c
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 2012,2013, by the GROMACS development team, led by * David van der Spoel, Berk Hess, Erik Lindahl, and including many * others, as listed in the AUTHORS file in the top-level source * directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ /* * Note: this file was generated by the Verlet kernel generator for * kernel type 4xn. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include "typedefs.h" #ifdef GMX_NBNXN_SIMD_4XN #ifdef GMX_NBNXN_HALF_WIDTH_SIMD #define GMX_USE_HALF_WIDTH_SIMD_HERE #endif #include "gmx_simd_macros.h" #include "gmx_simd_vec.h" #if !(GMX_SIMD_WIDTH_HERE == 2 || GMX_SIMD_WIDTH_HERE == 4 || GMX_SIMD_WIDTH_HERE == 8) #error "unsupported SIMD width" #endif #define GMX_SIMD_J_UNROLL_SIZE 1 #include "nbnxn_kernel_simd_4xn.h" #include "../nbnxn_kernel_common.h" #include "gmx_omp_nthreads.h" #include "types/force_flags.h" /*! \brief Kinds of electrostatic treatments in SIMD Verlet kernels */ enum { coultRF, coultTAB, coultTAB_TWIN, coultEWALD, coultEWALD_TWIN, coultNR }; /* Declare and define the kernel function pointer lookup tables. */ static p_nbk_func_ener p_nbk_ener[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_ener, nbnxn_kernel_simd_4xn_rf_comb_lb_ener, nbnxn_kernel_simd_4xn_rf_comb_none_ener, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_ener, nbnxn_kernel_simd_4xn_tab_comb_lb_ener, nbnxn_kernel_simd_4xn_tab_comb_none_ener, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_ener, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_ener, nbnxn_kernel_simd_4xn_tab_twin_comb_none_ener, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_ener, nbnxn_kernel_simd_4xn_ewald_comb_lb_ener, nbnxn_kernel_simd_4xn_ewald_comb_none_ener, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_ener, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_ener, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_ener, }, }; static p_nbk_func_ener p_nbk_energrp[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_energrp, nbnxn_kernel_simd_4xn_rf_comb_lb_energrp, nbnxn_kernel_simd_4xn_rf_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_energrp, nbnxn_kernel_simd_4xn_tab_comb_lb_energrp, nbnxn_kernel_simd_4xn_tab_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_energrp, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_energrp, nbnxn_kernel_simd_4xn_tab_twin_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_energrp, nbnxn_kernel_simd_4xn_ewald_comb_lb_energrp, nbnxn_kernel_simd_4xn_ewald_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_energrp, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_energrp, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_energrp, }, }; static p_nbk_func_noener p_nbk_noener[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_noener, nbnxn_kernel_simd_4xn_rf_comb_lb_noener, nbnxn_kernel_simd_4xn_rf_comb_none_noener, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_noener, nbnxn_kernel_simd_4xn_tab_comb_lb_noener, nbnxn_kernel_simd_4xn_tab_comb_none_noener, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_noener, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_noener, nbnxn_kernel_simd_4xn_tab_twin_comb_none_noener, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_noener, nbnxn_kernel_simd_4xn_ewald_comb_lb_noener, nbnxn_kernel_simd_4xn_ewald_comb_none_noener, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_noener, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_noener, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_noener, }, }; static void reduce_group_energies(int ng, int ng_2log, const real *VSvdw, const real *VSc, real *Vvdw, real *Vc) { const int unrollj = GMX_SIMD_WIDTH_HERE/GMX_SIMD_J_UNROLL_SIZE; const int unrollj_half = unrollj/2; int ng_p2, i, j, j0, j1, c, s; ng_p2 = (1<<ng_2log); /* The size of the x86 SIMD energy group buffer array is: * ng*ng*ng_p2*unrollj_half*simd_width */ for (i = 0; i < ng; i++) { for (j = 0; j < ng; j++) { Vvdw[i*ng+j] = 0; Vc[i*ng+j] = 0; } for (j1 = 0; j1 < ng; j1++) { for (j0 = 0; j0 < ng; j0++) { c = ((i*ng + j1)*ng_p2 + j0)*unrollj_half*unrollj; for (s = 0; s < unrollj_half; s++) { Vvdw[i*ng+j0] += VSvdw[c+0]; Vvdw[i*ng+j1] += VSvdw[c+1]; Vc [i*ng+j0] += VSc [c+0]; Vc [i*ng+j1] += VSc [c+1]; c += unrollj + 2; } } } } } #else /* GMX_NBNXN_SIMD_4XN */ #include "gmx_fatal.h" #endif /* GMX_NBNXN_SIMD_4XN */ void nbnxn_kernel_simd_4xn(nbnxn_pairlist_set_t *nbl_list, const nbnxn_atomdata_t *nbat, const interaction_const_t *ic, int ewald_excl, rvec *shift_vec, int force_flags, int clearF, real *fshift, real *Vc, real *Vvdw) #ifdef GMX_NBNXN_SIMD_4XN { int nnbl; nbnxn_pairlist_t **nbl; int coult; int nb; nnbl = nbl_list->nnbl; nbl = nbl_list->nbl; if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT) { coult = coultRF; } else { if (ewald_excl == ewaldexclTable) { if (ic->rcoulomb == ic->rvdw) { coult = coultTAB; } else { coult = coultTAB_TWIN; } } else { if (ic->rcoulomb == ic->rvdw) { coult = coultEWALD; } else { coult = coultEWALD_TWIN; } } } #pragma omp parallel for schedule(static) num_threads(gmx_omp_nthreads_get(emntNonbonded)) for (nb = 0; nb < nnbl; nb++) { nbnxn_atomdata_output_t *out; real *fshift_p; out = &nbat->out[nb]; if (clearF == enbvClearFYes) { clear_f(nbat, nb, out->f); } if ((force_flags & GMX_FORCE_VIRIAL) && nnbl == 1) { fshift_p = fshift; } else { fshift_p = out->fshift; if (clearF == enbvClearFYes) { clear_fshift(fshift_p); } } if (!(force_flags & GMX_FORCE_ENERGY)) { /* Don't calculate energies */ p_nbk_noener[coult][nbat->comb_rule](nbl[nb], nbat, ic, shift_vec, out->f, fshift_p); } else if (out->nV == 1) { /* No energy groups */ out->Vvdw[0] = 0; out->Vc[0] = 0; p_nbk_ener[coult][nbat->comb_rule](nbl[nb], nbat, ic, shift_vec, out->f, fshift_p, out->Vvdw, out->Vc); } else { /* Calculate energy group contributions */ int i; for (i = 0; i < out->nVS; i++) { out->VSvdw[i] = 0; } for (i = 0; i < out->nVS; i++) { out->VSc[i] = 0; } p_nbk_energrp[coult][nbat->comb_rule](nbl[nb], nbat, ic, shift_vec, out->f, fshift_p, out->VSvdw, out->VSc); reduce_group_energies(nbat->nenergrp, nbat->neg_2log, out->VSvdw, out->VSc, out->Vvdw, out->Vc); } } if (force_flags & GMX_FORCE_ENERGY) { reduce_energies_over_lists(nbat, nnbl, Vvdw, Vc); } } #else { gmx_incons("nbnxn_kernel_simd_4xn called when such kernels " " are not enabled."); } #endif #undef GMX_SIMD_J_UNROLL_SIZE
/* * Note: this file was generated by the Verlet kernel generator for kernel * type 4xn. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include "typedefs.h" #ifdef GMX_NBNXN_SIMD_4XN #ifdef GMX_NBNXN_HALF_WIDTH_SIMD #define GMX_USE_HALF_WIDTH_SIMD_HERE #endif #include "gmx_simd_macros.h" #include "gmx_simd_vec.h" #if !(GMX_SIMD_WIDTH_HERE == 2 || GMX_SIMD_WIDTH_HERE == 4 || GMX_SIMD_WIDTH_HERE == 8) #error "unsupported SIMD width" #endif #define GMX_SIMD_J_UNROLL_SIZE 1 #include "nbnxn_kernel_simd_4xn.h" #include "../nbnxn_kernel_common.h" #include "gmx_omp_nthreads.h" #include "types/force_flags.h" /* * ! \brief Kinds of electrostatic treatments in SIMD Verlet kernels */ enum { coultRF, coultTAB, coultTAB_TWIN, coultEWALD, coultEWALD_TWIN, coultNR }; /* Declare and define the kernel function pointer lookup tables. */ static p_nbk_func_ener p_nbk_ener[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_ener, nbnxn_kernel_simd_4xn_rf_comb_lb_ener, nbnxn_kernel_simd_4xn_rf_comb_none_ener, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_ener, nbnxn_kernel_simd_4xn_tab_comb_lb_ener, nbnxn_kernel_simd_4xn_tab_comb_none_ener, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_ener, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_ener, nbnxn_kernel_simd_4xn_tab_twin_comb_none_ener, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_ener, nbnxn_kernel_simd_4xn_ewald_comb_lb_ener, nbnxn_kernel_simd_4xn_ewald_comb_none_ener, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_ener, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_ener, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_ener, }, }; static p_nbk_func_ener p_nbk_energrp[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_energrp, nbnxn_kernel_simd_4xn_rf_comb_lb_energrp, nbnxn_kernel_simd_4xn_rf_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_energrp, nbnxn_kernel_simd_4xn_tab_comb_lb_energrp, nbnxn_kernel_simd_4xn_tab_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_energrp, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_energrp, nbnxn_kernel_simd_4xn_tab_twin_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_energrp, nbnxn_kernel_simd_4xn_ewald_comb_lb_energrp, nbnxn_kernel_simd_4xn_ewald_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_energrp, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_energrp, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_energrp, }, }; static p_nbk_func_noener p_nbk_noener[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_noener, nbnxn_kernel_simd_4xn_rf_comb_lb_noener, nbnxn_kernel_simd_4xn_rf_comb_none_noener, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_noener, nbnxn_kernel_simd_4xn_tab_comb_lb_noener, nbnxn_kernel_simd_4xn_tab_comb_none_noener, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_noener, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_noener, nbnxn_kernel_simd_4xn_tab_twin_comb_none_noener, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_noener, nbnxn_kernel_simd_4xn_ewald_comb_lb_noener, nbnxn_kernel_simd_4xn_ewald_comb_none_noener, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_noener, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_noener, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_noener, }, }; static void reduce_group_energies(int ng, int ng_2log, const real * VSvdw, const real * VSc, real * Vvdw, real * Vc) { const int unrollj = GMX_SIMD_WIDTH_HERE / GMX_SIMD_J_UNROLL_SIZE; const int unrollj_half = unrollj / 2; int ng_p2, i, j, j0, j1, c, s; ng_p2 = (1 << ng_2log); /* * The size of the x86 SIMD energy group buffer array is: * ng*ng*ng_p2*unrollj_half*simd_width */ for (i = 0; i < ng; i++) { for (j = 0; j < ng; j++) { Vvdw[i * ng + j] = 0; Vc[i * ng + j] = 0; } for (j1 = 0; j1 < ng; j1++) { for (j0 = 0; j0 < ng; j0++) { c = ((i * ng + j1) * ng_p2 + j0) * unrollj_half * unrollj; for (s = 0; s < unrollj_half; s++) { Vvdw[i * ng + j0] += VSvdw[c + 0]; Vvdw[i * ng + j1] += VSvdw[c + 1]; Vc[i * ng + j0] += VSc[c + 0]; Vc[i * ng + j1] += VSc[c + 1]; c += unrollj + 2; } } } } } #else /* GMX_NBNXN_SIMD_4XN */ #include "gmx_fatal.h" #endif /* GMX_NBNXN_SIMD_4XN */ void nbnxn_kernel_simd_4xn(nbnxn_pairlist_set_t * nbl_list, const nbnxn_atomdata_t * nbat, const interaction_const_t * ic, int ewald_excl, rvec * shift_vec, int force_flags, int clearF, real * fshift, real * Vc, real * Vvdw) #ifdef GMX_NBNXN_SIMD_4XN { int nnbl; nbnxn_pairlist_t **nbl; int coult; int nb; nnbl = nbl_list->nnbl; nbl = nbl_list->nbl; if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT) { coult = coultRF; } else { if (ewald_excl == ewaldexclTable) { if (ic->rcoulomb == ic->rvdw) { coult = coultTAB; } else { coult = coultTAB_TWIN; } } else { if (ic->rcoulomb == ic->rvdw) { coult = coultEWALD; } else { coult = coultEWALD_TWIN; } } } for (nb = 0; nb < nnbl; nb++) { nbnxn_atomdata_output_t *out; real *fshift_p; out = &nbat->out[nb]; if (clearF == enbvClearFYes) { clear_f(nbat, nb, out->f); } if ((force_flags & GMX_FORCE_VIRIAL) && nnbl == 1) { fshift_p = fshift; } else { fshift_p = out->fshift; if (clearF == enbvClearFYes) { clear_fshift(fshift_p); } } if (!(force_flags & GMX_FORCE_ENERGY)) { /* Don't calculate energies */ p_nbk_noener[coult][nbat->comb_rule] (nbl[nb], nbat, ic, shift_vec, out->f, fshift_p); } else if (out->nV == 1) { /* No energy groups */ out->Vvdw[0] = 0; out->Vc[0] = 0; p_nbk_ener[coult][nbat->comb_rule] (nbl[nb], nbat, ic, shift_vec, out->f, fshift_p, out->Vvdw, out->Vc); } else { /* Calculate energy group contributions */ int i; for (i = 0; i < out->nVS; i++) { out->VSvdw[i] = 0; } for (i = 0; i < out->nVS; i++) { out->VSc[i] = 0; } p_nbk_energrp[coult][nbat->comb_rule] (nbl[nb], nbat, ic, shift_vec, out->f, fshift_p, out->VSvdw, out->VSc); reduce_group_energies(nbat->nenergrp, nbat->neg_2log, out->VSvdw, out->VSc, out->Vvdw, out->Vc); } } if (force_flags & GMX_FORCE_ENERGY) { reduce_energies_over_lists(nbat, nnbl, Vvdw, Vc); } } #else { gmx_incons("nbnxn_kernel_simd_4xn called when such kernels " " are not enabled."); } #endif #undef GMX_SIMD_J_UNROLL_SIZE
/* * Note: this file was generated by the Verlet kernel generator for kernel * type 4xn. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include "typedefs.h" #ifdef GMX_NBNXN_SIMD_4XN #ifdef GMX_NBNXN_HALF_WIDTH_SIMD #define GMX_USE_HALF_WIDTH_SIMD_HERE #endif #include "gmx_simd_macros.h" #include "gmx_simd_vec.h" #if !(GMX_SIMD_WIDTH_HERE == 2 || GMX_SIMD_WIDTH_HERE == 4 || GMX_SIMD_WIDTH_HERE == 8) #error "unsupported SIMD width" #endif #define GMX_SIMD_J_UNROLL_SIZE 1 #include "nbnxn_kernel_simd_4xn.h" #include "../nbnxn_kernel_common.h" #include "gmx_omp_nthreads.h" #include "types/force_flags.h" /* * ! \brief Kinds of electrostatic treatments in SIMD Verlet kernels */ enum { coultRF, coultTAB, coultTAB_TWIN, coultEWALD, coultEWALD_TWIN, coultNR }; /* Declare and define the kernel function pointer lookup tables. */ static p_nbk_func_ener p_nbk_ener[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_ener, nbnxn_kernel_simd_4xn_rf_comb_lb_ener, nbnxn_kernel_simd_4xn_rf_comb_none_ener, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_ener, nbnxn_kernel_simd_4xn_tab_comb_lb_ener, nbnxn_kernel_simd_4xn_tab_comb_none_ener, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_ener, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_ener, nbnxn_kernel_simd_4xn_tab_twin_comb_none_ener, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_ener, nbnxn_kernel_simd_4xn_ewald_comb_lb_ener, nbnxn_kernel_simd_4xn_ewald_comb_none_ener, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_ener, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_ener, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_ener, }, }; static p_nbk_func_ener p_nbk_energrp[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_energrp, nbnxn_kernel_simd_4xn_rf_comb_lb_energrp, nbnxn_kernel_simd_4xn_rf_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_energrp, nbnxn_kernel_simd_4xn_tab_comb_lb_energrp, nbnxn_kernel_simd_4xn_tab_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_energrp, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_energrp, nbnxn_kernel_simd_4xn_tab_twin_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_energrp, nbnxn_kernel_simd_4xn_ewald_comb_lb_energrp, nbnxn_kernel_simd_4xn_ewald_comb_none_energrp, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_energrp, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_energrp, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_energrp, }, }; static p_nbk_func_noener p_nbk_noener[coultNR][ljcrNR] = { { nbnxn_kernel_simd_4xn_rf_comb_geom_noener, nbnxn_kernel_simd_4xn_rf_comb_lb_noener, nbnxn_kernel_simd_4xn_rf_comb_none_noener, }, { nbnxn_kernel_simd_4xn_tab_comb_geom_noener, nbnxn_kernel_simd_4xn_tab_comb_lb_noener, nbnxn_kernel_simd_4xn_tab_comb_none_noener, }, { nbnxn_kernel_simd_4xn_tab_twin_comb_geom_noener, nbnxn_kernel_simd_4xn_tab_twin_comb_lb_noener, nbnxn_kernel_simd_4xn_tab_twin_comb_none_noener, }, { nbnxn_kernel_simd_4xn_ewald_comb_geom_noener, nbnxn_kernel_simd_4xn_ewald_comb_lb_noener, nbnxn_kernel_simd_4xn_ewald_comb_none_noener, }, { nbnxn_kernel_simd_4xn_ewald_twin_comb_geom_noener, nbnxn_kernel_simd_4xn_ewald_twin_comb_lb_noener, nbnxn_kernel_simd_4xn_ewald_twin_comb_none_noener, }, }; static void reduce_group_energies(int ng, int ng_2log, const real * VSvdw, const real * VSc, real * Vvdw, real * Vc) { const int unrollj = GMX_SIMD_WIDTH_HERE / GMX_SIMD_J_UNROLL_SIZE; const int unrollj_half = unrollj / 2; int ng_p2, i, j, j0, j1, c, s; ng_p2 = (1 << ng_2log); /* * The size of the x86 SIMD energy group buffer array is: * ng*ng*ng_p2*unrollj_half*simd_width */ for (i = 0; i < ng; i++) { for (j = 0; j < ng; j++) { Vvdw[i * ng + j] = 0; Vc[i * ng + j] = 0; } for (j1 = 0; j1 < ng; j1++) { for (j0 = 0; j0 < ng; j0++) { c = ((i * ng + j1) * ng_p2 + j0) * unrollj_half * unrollj; for (s = 0; s < unrollj_half; s++) { Vvdw[i * ng + j0] += VSvdw[c + 0]; Vvdw[i * ng + j1] += VSvdw[c + 1]; Vc[i * ng + j0] += VSc[c + 0]; Vc[i * ng + j1] += VSc[c + 1]; c += unrollj + 2; } } } } } #else /* GMX_NBNXN_SIMD_4XN */ #include "gmx_fatal.h" #endif /* GMX_NBNXN_SIMD_4XN */ void nbnxn_kernel_simd_4xn(nbnxn_pairlist_set_t * nbl_list, const nbnxn_atomdata_t * nbat, const interaction_const_t * ic, int ewald_excl, rvec * shift_vec, int force_flags, int clearF, real * fshift, real * Vc, real * Vvdw) #ifdef GMX_NBNXN_SIMD_4XN { int nnbl; nbnxn_pairlist_t **nbl; int coult; int nb; nnbl = nbl_list->nnbl; nbl = nbl_list->nbl; if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT) { coult = coultRF; } else { if (ewald_excl == ewaldexclTable) { if (ic->rcoulomb == ic->rvdw) { coult = coultTAB; } else { coult = coultTAB_TWIN; } } else { if (ic->rcoulomb == ic->rvdw) { coult = coultEWALD; } else { coult = coultEWALD_TWIN; } } } #pragma omp parallel for schedule(static) num_threads(gmx_omp_nthreads_get(emntNonbonded)) for (nb = 0; nb < nnbl; nb++) { nbnxn_atomdata_output_t *out; real *fshift_p; out = &nbat->out[nb]; if (clearF == enbvClearFYes) { clear_f(nbat, nb, out->f); } if ((force_flags & GMX_FORCE_VIRIAL) && nnbl == 1) { fshift_p = fshift; } else { fshift_p = out->fshift; if (clearF == enbvClearFYes) { clear_fshift(fshift_p); } } if (!(force_flags & GMX_FORCE_ENERGY)) { /* Don't calculate energies */ p_nbk_noener[coult][nbat->comb_rule] (nbl[nb], nbat, ic, shift_vec, out->f, fshift_p); } else if (out->nV == 1) { /* No energy groups */ out->Vvdw[0] = 0; out->Vc[0] = 0; p_nbk_ener[coult][nbat->comb_rule] (nbl[nb], nbat, ic, shift_vec, out->f, fshift_p, out->Vvdw, out->Vc); } else { /* Calculate energy group contributions */ int i; for (i = 0; i < out->nVS; i++) { out->VSvdw[i] = 0; } for (i = 0; i < out->nVS; i++) { out->VSc[i] = 0; } p_nbk_energrp[coult][nbat->comb_rule] (nbl[nb], nbat, ic, shift_vec, out->f, fshift_p, out->VSvdw, out->VSc); reduce_group_energies(nbat->nenergrp, nbat->neg_2log, out->VSvdw, out->VSc, out->Vvdw, out->Vc); } } if (force_flags & GMX_FORCE_ENERGY) { reduce_energies_over_lists(nbat, nnbl, Vvdw, Vc); } } #else { gmx_incons("nbnxn_kernel_simd_4xn called when such kernels " " are not enabled."); } #endif #undef GMX_SIMD_J_UNROLL_SIZE
threshold.c
/* Copyright 2014. The Regents of the University of California. * Copyright 2015-2017. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013-2017 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2015-2016 Jon Tamir <jtamir@eecs.berkeley.edu> * 2015 Frank Ong <frankong@berkeley.edu> */ #include <stdbool.h> #include <complex.h> #include "num/flpmath.h" #include "num/multind.h" #include "num/init.h" #include "iter/prox.h" #include "iter/thresh.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/debug.h" #include "misc/opts.h" #include "lowrank/lrthresh.h" #include "linops/waveop.h" #include "dfwavelet/prox_dfwavelet.h" // FIXME: lowrank interface should not be coupled to mri.h -- it should take D as an input #ifndef DIMS #define DIMS 16 #endif // FIXME: consider moving this to a more accessible location? static void wthresh(unsigned int D, const long dims[D], float lambda, unsigned int flags, complex float* out, const complex float* in) { long minsize[D]; md_singleton_dims(D, minsize); long course_scale[3] = MD_INIT_ARRAY(3, 16); md_copy_dims(3, minsize, course_scale); unsigned int wflags = 7; // FIXME for (unsigned int i = 0; i < 3; i++) if (dims[i] < minsize[i]) wflags = MD_CLEAR(wflags, i); long strs[D]; md_calc_strides(D, strs, dims, CFL_SIZE); const struct linop_s* w = linop_wavelet_create(D, wflags, dims, strs, minsize, false); const struct operator_p_s* p = prox_unithresh_create(D, w, lambda, flags); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void lrthresh(unsigned int D, const long dims[D], int llrblk, float lambda, unsigned int flags, complex float* out, const complex float* in) { long blkdims[MAX_LEV][D]; int levels = llr_blkdims(blkdims, ~flags, dims, llrblk); UNUSED(levels); const struct operator_p_s* p = lrthresh_create(dims, false, ~flags, (const long (*)[])blkdims, lambda, false, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void dfthresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in) { long minsize[3]; md_singleton_dims(3, minsize); long coarse_scale[3] = MD_INIT_ARRAY(3, 16); md_min_dims(3, ~0u, minsize, dims, coarse_scale); complex float res[3]; res[0] = 1.; res[1] = 1.; res[2] = 1.; assert(3 == dims[TE_DIM]); const struct operator_p_s* p = prox_dfwavelet_create(dims, minsize, res, TE_DIM, lambda, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void hard_thresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in) { long size = md_calc_size(DIMS, dims) * 2; const float* inf = (const float*)in; float* outf = (float*)out; #pragma omp parallel for for (long i = 0; i < size; i++) outf[i] = inf[i] > lambda ? inf[i] : 0.; } static const char usage_str[] = "lambda <input> <output>"; static const char help_str[] = "Perform (soft) thresholding with parameter lambda."; int main_threshold(int argc, char* argv[]) { unsigned int flags = 0; enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD } th_type = NONE; int llrblk = 8; const struct opt_s opts[] = { OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"), OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"), OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"), OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"), OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"), OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"), }; cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); const int N = DIMS; long dims[N]; complex float* idata = load_cfl(argv[2], N, dims); complex float* odata = create_cfl(argv[3], N, dims); float lambda = atof(argv[1]); switch (th_type) { case WAV: wthresh(N, dims, lambda, flags, odata, idata); break; case LLR: lrthresh(N, dims, llrblk, lambda, flags, odata, idata); break; case DFW: dfthresh(N, dims, lambda, odata, idata); break; case HARD: hard_thresh(N, dims, lambda, odata, idata); break; default: md_zsoftthresh(N, dims, lambda, flags, odata, idata); } unmap_cfl(N, dims, idata); unmap_cfl(N, dims, odata); return 0; }
#include <stdbool.h> #include <complex.h> #include "num/flpmath.h" #include "num/multind.h" #include "num/init.h" #include "iter/prox.h" #include "iter/thresh.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/debug.h" #include "misc/opts.h" #include "lowrank/lrthresh.h" #include "linops/waveop.h" #include "dfwavelet/prox_dfwavelet.h" // FIXME:lowrank interface should not be coupled to mri.h-- it should take D as an input #ifndef DIMS #define DIMS 16 #endif // FIXME:consider moving this to a more accessible location ? static void wthresh(unsigned int D, const long dims[D], float lambda, unsigned int flags, complex float *out, const complex float *in) { long minsize[D]; md_singleton_dims(D, minsize); long course_scale[3] = MD_INIT_ARRAY(3, 16); md_copy_dims(3, minsize, course_scale); unsigned int wflags = 7; //FIXME for (unsigned int i = 0; i < 3; i++) if (dims[i] < minsize[i]) wflags = MD_CLEAR(wflags, i); long strs[D]; md_calc_strides(D, strs, dims, CFL_SIZE); const struct linop_s *w = linop_wavelet_create(D, wflags, dims, strs, minsize, false); const struct operator_p_s *p = prox_unithresh_create(D, w, lambda, flags); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void lrthresh(unsigned int D, const long dims[D], int llrblk, float lambda, unsigned int flags, complex float *out, const complex float *in) { long blkdims[MAX_LEV][D]; int levels = llr_blkdims(blkdims, ~flags, dims, llrblk); UNUSED(levels); const struct operator_p_s *p = lrthresh_create(dims, false, ~flags, (const long (*)[])blkdims, lambda, false, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void dfthresh(unsigned int D, const long dims[D], float lambda, complex float *out, const complex float *in) { long minsize[3]; md_singleton_dims(3, minsize); long coarse_scale[3] = MD_INIT_ARRAY(3, 16); md_min_dims(3, ~0u, minsize, dims, coarse_scale); complex float res[3]; res[0] = 1.; res[1] = 1.; res[2] = 1.; assert(3 == dims[TE_DIM]); const struct operator_p_s *p = prox_dfwavelet_create(dims, minsize, res, TE_DIM, lambda, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void hard_thresh(unsigned int D, const long dims[D], float lambda, complex float *out, const complex float *in) { long size = md_calc_size(DIMS, dims) * 2; const float *inf = (const float *)in; float *outf = (float *)out; for (long i = 0; i < size; i++) outf[i] = inf[i] > lambda ? inf[i] : 0.; } static const char usage_str[] = "lambda <input> <output>"; static const char help_str[] = "Perform (soft) thresholding with parameter lambda."; int main_threshold(int argc, char *argv[]) { unsigned int flags = 0; enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD } th_type = NONE; int llrblk = 8; const struct opt_s opts[] = { OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"), OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"), OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"), OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"), OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"), OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"), }; cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); const int N = DIMS; long dims[N]; complex float *idata = load_cfl(argv[2], N, dims); complex float *odata = create_cfl(argv[3], N, dims); float lambda = atof(argv[1]); switch (th_type) { case WAV: wthresh(N, dims, lambda, flags, odata, idata); break; case LLR: lrthresh(N, dims, llrblk, lambda, flags, odata, idata); break; case DFW: dfthresh(N, dims, lambda, odata, idata); break; case HARD: hard_thresh(N, dims, lambda, odata, idata); break; default: md_zsoftthresh(N, dims, lambda, flags, odata, idata); } unmap_cfl(N, dims, idata); unmap_cfl(N, dims, odata); return 0; }
#include <stdbool.h> #include <complex.h> #include "num/flpmath.h" #include "num/multind.h" #include "num/init.h" #include "iter/prox.h" #include "iter/thresh.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/debug.h" #include "misc/opts.h" #include "lowrank/lrthresh.h" #include "linops/waveop.h" #include "dfwavelet/prox_dfwavelet.h" // FIXME:lowrank interface should not be coupled to mri.h-- it should take D as an input #ifndef DIMS #define DIMS 16 #endif // FIXME:consider moving this to a more accessible location ? static void wthresh(unsigned int D, const long dims[D], float lambda, unsigned int flags, complex float *out, const complex float *in) { long minsize[D]; md_singleton_dims(D, minsize); long course_scale[3] = MD_INIT_ARRAY(3, 16); md_copy_dims(3, minsize, course_scale); unsigned int wflags = 7; //FIXME for (unsigned int i = 0; i < 3; i++) if (dims[i] < minsize[i]) wflags = MD_CLEAR(wflags, i); long strs[D]; md_calc_strides(D, strs, dims, CFL_SIZE); const struct linop_s *w = linop_wavelet_create(D, wflags, dims, strs, minsize, false); const struct operator_p_s *p = prox_unithresh_create(D, w, lambda, flags); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void lrthresh(unsigned int D, const long dims[D], int llrblk, float lambda, unsigned int flags, complex float *out, const complex float *in) { long blkdims[MAX_LEV][D]; int levels = llr_blkdims(blkdims, ~flags, dims, llrblk); UNUSED(levels); const struct operator_p_s *p = lrthresh_create(dims, false, ~flags, (const long (*)[])blkdims, lambda, false, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void dfthresh(unsigned int D, const long dims[D], float lambda, complex float *out, const complex float *in) { long minsize[3]; md_singleton_dims(3, minsize); long coarse_scale[3] = MD_INIT_ARRAY(3, 16); md_min_dims(3, ~0u, minsize, dims, coarse_scale); complex float res[3]; res[0] = 1.; res[1] = 1.; res[2] = 1.; assert(3 == dims[TE_DIM]); const struct operator_p_s *p = prox_dfwavelet_create(dims, minsize, res, TE_DIM, lambda, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void hard_thresh(unsigned int D, const long dims[D], float lambda, complex float *out, const complex float *in) { long size = md_calc_size(DIMS, dims) * 2; const float *inf = (const float *)in; float *outf = (float *)out; #pragma omp parallel for for (long i = 0; i < size; i++) outf[i] = inf[i] > lambda ? inf[i] : 0.; } static const char usage_str[] = "lambda <input> <output>"; static const char help_str[] = "Perform (soft) thresholding with parameter lambda."; int main_threshold(int argc, char *argv[]) { unsigned int flags = 0; enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD } th_type = NONE; int llrblk = 8; const struct opt_s opts[] = { OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"), OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"), OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"), OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"), OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"), OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"), }; cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); const int N = DIMS; long dims[N]; complex float *idata = load_cfl(argv[2], N, dims); complex float *odata = create_cfl(argv[3], N, dims); float lambda = atof(argv[1]); switch (th_type) { case WAV: wthresh(N, dims, lambda, flags, odata, idata); break; case LLR: lrthresh(N, dims, llrblk, lambda, flags, odata, idata); break; case DFW: dfthresh(N, dims, lambda, odata, idata); break; case HARD: hard_thresh(N, dims, lambda, odata, idata); break; default: md_zsoftthresh(N, dims, lambda, flags, odata, idata); } unmap_cfl(N, dims, idata); unmap_cfl(N, dims, odata); return 0; }